diff --git a/.travis.yml b/.travis.yml index 2cb7b8e96658b76419e7a3f34d648eab87457abc..a86f2463e9f23b680748b7dd513eec4d5972e1e8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -49,7 +49,7 @@ matrix: ./test-all.sh $TRAVIS_EVENT_TYPE || travis_terminate $? cd ${TRAVIS_BUILD_DIR}/tests/pytest - ./smoketest.sh -g 2>&1 | tee mem-error-out.txt + ./valgrind-test.sh -g 2>&1 | tee mem-error-out.txt sleep 1 # Color setting @@ -115,7 +115,7 @@ matrix: # The command that will be added as an argument to "cov-build" to compile your project for analysis, # ** likely specific to your build ** - build_command: make > /dev/null + build_command: make # Pattern to match selecting branches that will run analysis. We recommend leaving this set to 'coverity_scan'. # Take care in resource usage, and consider the build frequency allowances per @@ -163,11 +163,9 @@ matrix: cd ${TRAVIS_BUILD_DIR}/tests - ./test-all.sh + ./test-all.sh $TRAVIS_EVENT_TYPE COVER - if [ "$?" -ne "0" ]; then - travis_terminate $? - fi + TEST_RESULT=$? pkill taosd sleep 1 @@ -199,6 +197,9 @@ matrix: echo -e "${RED} ## Codecov did not collect coverage report! ## ${NC} " fi + if [ "$TEST_RESULT" -ne "0" ]; then + travis_terminate $? + fi ;; esac diff --git a/README.md b/README.md index 158ae040fa0a95e6c2209ea71f8a3e5da6f848cc..0babfe77763812ae1251e6ac0773091e29724a01 100644 --- a/README.md +++ b/README.md @@ -121,7 +121,7 @@ TDengine provides abundant developing tools for users to develop on TDengine. Fo 1. sudo apt install build-essential cmake net-tools python-pip python-setuptools python3-pip - python3-setuptools valgrind + python3-setuptools valgrind psmisc curl 2. git clone ; cd TDengine diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 61a5fdd311fa613e01ef78b8e36eac641c77f253..6ea1ee6440999d843f2326ffc2567b9ca79957f9 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -365,7 +365,7 @@ void tscInitMsgsFp(); int tsParseSql(SSqlObj *pSql, bool multiVnodeInsertion); -void tscProcessMsgFromServer(SRpcMsg *rpcMsg); +void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet); int tscProcessSql(SSqlObj *pSql); int tscRenewMeterMeta(SSqlObj *pSql, char *tableId); diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index c1944e96c94372f19e9fc499630cd94083ac30fa..6fb8df24441907fde789748bfeeaa6a6eb67fac3 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -1691,10 +1691,7 @@ static void last_function(SQLFunctionCtx *pCtx) { } static void last_function_f(SQLFunctionCtx *pCtx, int32_t index) { - if (pCtx->order == TSDB_ORDER_ASC) { - return; - } - + assert(pCtx->order != TSDB_ORDER_ASC); void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { return; @@ -2912,7 +2909,7 @@ static void leastsquares_finalizer(SQLFunctionCtx *pCtx) { } static void date_col_output_function(SQLFunctionCtx *pCtx) { - if (pCtx->scanFlag == SUPPLEMENTARY_SCAN) { + if (pCtx->scanFlag == REVERSE_SCAN) { return; } @@ -2969,11 +2966,12 @@ static void tag_project_function(SQLFunctionCtx *pCtx) { char* output = pCtx->aOutputBuf; if (pCtx->tag.nType == TSDB_DATA_TYPE_BINARY || pCtx->tag.nType == TSDB_DATA_TYPE_NCHAR) { - *(int16_t*) output = pCtx->tag.nLen; - output += VARSTR_HEADER_SIZE; + varDataSetLen(output, pCtx->tag.nLen); + tVariantDump(&pCtx->tag, varDataVal(output), pCtx->outputType); + } else { + tVariantDump(&pCtx->tag, output, pCtx->outputType); } - tVariantDump(&pCtx->tag, output, pCtx->outputType); pCtx->aOutputBuf += pCtx->outputBytes; } } diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4cc03f7ad9d04c4bd48c44d2f0563edecdc394cf..a81ad19e4fb5f753919297d9be3396ab058568fa 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5563,8 +5563,8 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } - *(VarDataLenT*)tagVal = pList->a[i].pVar.nLen; - ret = tVariantDump(&(pList->a[i].pVar), tagVal + VARSTR_HEADER_SIZE, pTagSchema[i].type); + ret = tVariantDump(&(pList->a[i].pVar), varDataVal(tagVal), pTagSchema[i].type); + varDataSetLen(tagVal, pList->a[i].pVar.nLen); } else { ret = tVariantDump(&(pList->a[i].pVar), tagVal, pTagSchema[i].type); } @@ -5911,7 +5911,7 @@ int32_t exprTreeFromSqlExpr(tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray* } if (pSqlExpr->pLeft == NULL) { - if (pSqlExpr->nSQLOptr >= TK_TINYINT && pSqlExpr->nSQLOptr <= TK_DOUBLE) { + if (pSqlExpr->nSQLOptr >= TK_BOOL && pSqlExpr->nSQLOptr <= TK_STRING) { *pExpr = calloc(1, sizeof(tExprNode)); (*pExpr)->nodeType = TSQL_NODE_VALUE; (*pExpr)->pVal = calloc(1, sizeof(tVariant)); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 119a84e964f70bfa061d1b245e483eaf2210dd25..98cbe9dbdeff69e8de996b23069158b76328dac2 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -221,7 +221,7 @@ int tscSendMsgToServer(SSqlObj *pSql) { return TSDB_CODE_SUCCESS; } -void tscProcessMsgFromServer(SRpcMsg *rpcMsg) { +void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) { SSqlObj *pSql = (SSqlObj *)rpcMsg->handle; if (pSql == NULL) { tscError("%p sql is already released", pSql->signature); @@ -245,6 +245,12 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg) { return; } + if (pCmd->command < TSDB_SQL_MGMT) { + if (pIpSet) pSql->ipList = *pIpSet; + } else { + if (pIpSet) tscMgmtIpSet = *pIpSet; + } + if (rpcMsg->pCont == NULL) { rpcMsg->code = TSDB_CODE_NETWORK_UNAVAIL; } else { @@ -492,13 +498,15 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pMsg += sizeof(pQueryInfo->type); // todo valid the vgroupId at the client side - if (UTIL_TABLE_IS_SUPERTABLE(pQueryInfo->pTableMetaInfo[0])) { - SVgroupsInfo* pVgroupInfo = pQueryInfo->pTableMetaInfo[0]->vgroupList; - assert(pVgroupInfo->numOfVgroups == 1); // todo fix me + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + + if (UTIL_TABLE_IS_SUPERTABLE(pTableMetaInfo)) { + int32_t vgIndex = pTableMetaInfo->vgroupIndex; - pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[0].vgId); + SVgroupsInfo* pVgroupInfo = pTableMetaInfo->vgroupList; + pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId); } else { - STableMeta* pTableMeta = pQueryInfo->pTableMetaInfo[0]->pTableMeta; + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; pRetrieveMsg->header.vgId = htonl(pTableMeta->vgroupInfo.vgId); } diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index d3a81adca6dc8fa48dcbbff8d95960f43e0caa1d..7a1c08d0563ecabd9d4a3cbd00ea622d8e5fe02b 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -1436,8 +1436,8 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO return; } else { // reach the maximum retry count, abort atomic_val_compare_exchange_32(&pState->code, TSDB_CODE_SUCCESS, numOfRows); - tscError("%p sub:%p retrieve failed,code:%d,orderOfSub:%d failed.no more retry,set global code:%d", pPObj, pSql, - numOfRows, subqueryIndex, tstrerror(pState->code)); + tscError("%p sub:%p retrieve failed,code:%s,orderOfSub:%d failed.no more retry,set global code:%s", pPObj, pSql, + tstrerror(numOfRows), subqueryIndex, tstrerror(pState->code)); } } @@ -1450,7 +1450,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO } // all subqueries are failed - tscError("%p retrieve from %d vnode(s) completed,code:%d.FAILED.", pPObj, pState->numOfTotal, pState->code); + tscError("%p retrieve from %d vnode(s) completed,code:%s.FAILED.", pPObj, pState->numOfTotal, tstrerror(pState->code)); pPObj->res.code = pState->code; // release allocated resource diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index dd3cb76cbf40876718786dae14dbb5a7b263d8c7..00afb977fd9390e9f063a0dd72a9ac74073d3c9f 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1675,10 +1675,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, const char* name, ST pTableMetaInfo->pTableMeta = pTableMeta; if (vgroupList != NULL) { - assert(vgroupList->numOfVgroups == 1); // todo fix me - size_t size = sizeof(SVgroupsInfo) + sizeof(SCMVgroupInfo) * vgroupList->numOfVgroups; - pTableMetaInfo->vgroupList = malloc(size); memcpy(pTableMetaInfo->vgroupList, vgroupList, size); } diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h index ec52bcd2ae34acf04806727fddf5a24dfcb260ef..51a5dad4869f3d464d83397157d9fda4482dc6a9 100644 --- a/src/common/inc/tdataformat.h +++ b/src/common/inc/tdataformat.h @@ -115,10 +115,8 @@ static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t o case TSDB_DATA_TYPE_BINARY: case TSDB_DATA_TYPE_NCHAR: return POINTER_SHIFT(row, *(VarDataOffsetT *)POINTER_SHIFT(row, offset)); - break; default: return POINTER_SHIFT(row, offset); - break; } } diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c index 62d22573ff1981a1b12dc9a4aaaee92f2c3c1a5f..36a7c9880735f8186368ff66c921ad499b31bc73 100644 --- a/src/dnode/src/dnodeMgmt.c +++ b/src/dnode/src/dnodeMgmt.c @@ -266,9 +266,12 @@ static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg) { return taosCfgDynamicOptions(pCfg->config); } +void dnodeUpdateIpSet(SRpcIpSet *pIpSet) { + dPrint("mnode IP list is changed, numOfIps:%d inUse:%d", pIpSet->numOfIps, pIpSet->inUse); + for (int i = 0; i < pIpSet->numOfIps; ++i) { + dPrint("mnode index:%d %s:%u", i, pIpSet->fqdn[i], pIpSet->port[i]) + } -void dnodeUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet) { - dTrace("mgmt IP list is changed for ufp is called"); tsMnodeIpSet = *pIpSet; } diff --git a/src/dnode/src/dnodePeer.c b/src/dnode/src/dnodePeer.c index ea21ed02061ce5aaf52222914b465d7d338229e0..51913d80c4531fabb4c525098b2d53fc0176a3f2 100644 --- a/src/dnode/src/dnodePeer.c +++ b/src/dnode/src/dnodePeer.c @@ -29,11 +29,11 @@ #include "dnodeVWrite.h" #include "mnode.h" -extern void dnodeUpdateIpSet(void *ahandle, SRpcIpSet *pIpSet); +extern void dnodeUpdateIpSet(SRpcIpSet *pIpSet); static void (*dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *); -static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg); +static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcIpSet *); static void (*dnodeProcessRspMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *rpcMsg); -static void dnodeProcessRspFromDnode(SRpcMsg *pMsg); +static void dnodeProcessRspFromDnode(SRpcMsg *pMsg, SRpcIpSet *pIpSet); static void *tsDnodeServerRpc = NULL; static void *tsDnodeClientRpc = NULL; @@ -81,7 +81,7 @@ void dnodeCleanupServer() { } } -static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg) { +static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcIpSet *pIpSet) { SRpcMsg rspMsg; rspMsg.handle = pMsg->handle; rspMsg.pCont = NULL; @@ -119,7 +119,6 @@ int32_t dnodeInitClient() { rpcInit.label = "DND-C"; rpcInit.numOfThreads = 1; rpcInit.cfp = dnodeProcessRspFromDnode; - rpcInit.ufp = dnodeUpdateIpSet; rpcInit.sessions = 100; rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.idleTime = tsShellActivityTimer * 1000; @@ -145,9 +144,10 @@ void dnodeCleanupClient() { } } -static void dnodeProcessRspFromDnode(SRpcMsg *pMsg) { +static void dnodeProcessRspFromDnode(SRpcMsg *pMsg, SRpcIpSet *pIpSet) { if (dnodeProcessRspMsgFp[pMsg->msgType]) { + if (pMsg->msgType == TSDB_MSG_TYPE_DM_STATUS_RSP && pIpSet) dnodeUpdateIpSet(pIpSet); (*dnodeProcessRspMsgFp[pMsg->msgType])(pMsg); } else { dError("RPC %p, msg:%s is not processed", pMsg->handle, taosMsg[pMsg->msgType]); diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index 28679262faa7d38eebf8c49317b5df272ae2625e..dc0efd405f5b89056b16bec090b4fdd81c31e710 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -28,7 +28,7 @@ #include "dnodeShell.h" static void (*dnodeProcessShellMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *); -static void dnodeProcessMsgFromShell(SRpcMsg *pMsg); +static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcIpSet *); static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char *secret, char *ckey); static void * tsDnodeShellRpc = NULL; static int32_t tsDnodeQueryReqNum = 0; @@ -106,7 +106,7 @@ void dnodeCleanupShell() { } } -void dnodeProcessMsgFromShell(SRpcMsg *pMsg) { +void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcIpSet *pIpSet) { SRpcMsg rpcMsg; rpcMsg.handle = pMsg->handle; rpcMsg.pCont = NULL; diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 571b551368c8139a7527965deb1549ac479866a4..33a0e4f2c6e12b7e67e6ba58fb833846f05e9044 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -36,6 +36,11 @@ extern "C" { typedef int32_t VarDataOffsetT; typedef int16_t VarDataLenT; +typedef struct tstr { + VarDataLenT len; + char data[]; +} tstr; + #define VARSTR_HEADER_SIZE sizeof(VarDataLenT) #define varDataLen(v) ((VarDataLenT *)(v))[0] @@ -74,10 +79,10 @@ extern const int32_t TYPE_BYTES[11]; #define TSDB_DATA_NULL_STR "NULL" #define TSDB_DATA_NULL_STR_L "null" -#define TSDB_TRUE 1 -#define TSDB_FALSE 0 -#define TSDB_OK 0 -#define TSDB_ERR -1 +#define TSDB_TRUE 1 +#define TSDB_FALSE 0 +#define TSDB_OK 0 +#define TSDB_ERR -1 #define TS_PATH_DELIMITER "." diff --git a/src/inc/trpc.h b/src/inc/trpc.h index 8b082b65b8ad5c1e951e216c9b9192bf300ffa40..eff210433f7d7bcc2f4a5ad1d12bd88ed59581be 100644 --- a/src/inc/trpc.h +++ b/src/inc/trpc.h @@ -66,10 +66,7 @@ typedef struct { char *ckey; // ciphering key // call back to process incoming msg, code shall be ignored by server app - void (*cfp)(SRpcMsg *); - - // call back to process notify the ipSet changes, for client app only - void (*ufp)(void *ahandle, SRpcIpSet *pIpSet); + void (*cfp)(SRpcMsg *, SRpcIpSet *); // call back to retrieve the client auth info, for server app only int (*afp)(char *tableId, char *spi, char *encrypt, char *secret, char *ckey); diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index ab5d26e0ddd36fcdfdd8b94cb6163c9b8f280ff3..4468ee4262c7fda28b42c89b1b184bcc6703a43f 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -102,7 +102,7 @@ int tsdbTableSetSName(STableCfg *config, char *sname, bool dup); void tsdbClearTableCfg(STableCfg *config); int32_t tsdbGetTableTagVal(TsdbRepoT *repo, STableId* id, int32_t colId, int16_t *type, int16_t *bytes, char **val); -int32_t tsdbGetTableName(TsdbRepoT *repo, STableId* id, char** name); +char* tsdbGetTableName(TsdbRepoT *repo, const STableId* id, int16_t* bytes); int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg); int tsdbDropTable(TsdbRepoT *pRepo, STableId tableId); diff --git a/src/mnode/inc/mgmtDef.h b/src/mnode/inc/mgmtDef.h index ba71f9373b54597a17d59e75f4e3dadc0c5d6310..3ac2efb83b98f053d7f5fecb6885cb9ec516df28 100644 --- a/src/mnode/inc/mgmtDef.h +++ b/src/mnode/inc/mgmtDef.h @@ -85,8 +85,7 @@ typedef struct SSuperTableObj { int32_t numOfTables; int16_t nextColId; SSchema * schema; - int32_t vgLen; - int32_t * vgList; + void * vgHash; } SSuperTableObj; typedef struct { diff --git a/src/mnode/src/mgmtDb.c b/src/mnode/src/mgmtDb.c index 4f54a85baa79fa7b386b63b4484e3f821dabe6a0..2f17df92fa70a218b666a18557ceb4f8fc15e737 100644 --- a/src/mnode/src/mgmtDb.c +++ b/src/mnode/src/mgmtDb.c @@ -297,8 +297,10 @@ static int32_t mgmtCreateDb(SAcctObj *pAcct, SCMCreateDbMsg *pCreate) { if (pDb != NULL) { mgmtDecDbRef(pDb); if (pCreate->ignoreExist) { + mTrace("db:%s, already exist, ignore exist is set", pCreate->db); return TSDB_CODE_SUCCESS; } else { + mError("db:%s, is already exist, ignore exist not set", pCreate->db); return TSDB_CODE_DB_ALREADY_EXIST; } } @@ -751,6 +753,8 @@ static void mgmtProcessCreateDbMsg(SQueuedMsg *pMsg) { code = mgmtCreateDb(pMsg->pUser->pAcct, pCreate); if (code == TSDB_CODE_SUCCESS) { mLPrint("db:%s, is created by %s", pCreate->db, pMsg->pUser->user); + } else { + mError("db:%s, failed to create, reason:%s", pCreate->db, tstrerror(code)); } } diff --git a/src/mnode/src/mgmtTable.c b/src/mnode/src/mgmtTable.c index bfe357cf7c74069e5cb41f569f9129d3d26c1376..49311b0112b6ecf7049e462aeea74153fe1be3b5 100644 --- a/src/mnode/src/mgmtTable.c +++ b/src/mnode/src/mgmtTable.c @@ -24,6 +24,7 @@ #include "tname.h" #include "tidpool.h" #include "tglobal.h" +#include "hash.h" #include "dnode.h" #include "mgmtDef.h" #include "mgmtInt.h" @@ -363,39 +364,35 @@ static void mgmtCleanUpChildTables() { } static void mgmtAddTableIntoStable(SSuperTableObj *pStable, SChildTableObj *pCtable) { - if (pStable->vgLen == 0) { - pStable->vgLen = 8; - pStable->vgList = calloc(pStable->vgLen, sizeof(int32_t)); - } - - bool find = false; - int32_t pos = 0; - for (pos = 0; pos < pStable->vgLen; ++pos) { - if (pStable->vgList[pos] == 0) break; - if (pStable->vgList[pos] == pCtable->vgId) { - find = true; - break; - } - } + pStable->numOfTables++; - if (!find) { - if (pos >= pStable->vgLen) { - pStable->vgLen *= 2; - pStable->vgList = realloc(pStable->vgList, pStable->vgLen * sizeof(int32_t)); - } - pStable->vgList[pos] = pCtable->vgId; + if (pStable->vgHash == NULL) { + pStable->vgHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false); } - pStable->numOfTables++; + if (pStable->vgHash != NULL) { + taosHashPut(pStable->vgHash, (char *)&pCtable->vgId, sizeof(pCtable->vgId), &pCtable->vgId, sizeof(pCtable->vgId)); + } } static void mgmtRemoveTableFromStable(SSuperTableObj *pStable, SChildTableObj *pCtable) { pStable->numOfTables--; + + if (pStable->vgHash == NULL) return; + + SVgObj *pVgroup = mgmtGetVgroup(pCtable->vgId); + if (pVgroup != NULL) { + taosHashRemove(pStable->vgHash, (char *)&pCtable->vgId, sizeof(pCtable->vgId)); + } + mgmtDecVgroupRef(pVgroup); } static void mgmtDestroySuperTable(SSuperTableObj *pStable) { + if (pStable->vgHash != NULL) { + taosHashCleanup(pStable->vgHash); + pStable->vgHash = NULL; + } tfree(pStable->schema); - tfree(pStable->vgList) tfree(pStable); } @@ -434,7 +431,7 @@ static int32_t mgmtSuperTableActionUpdate(SSdbOper *pOper) { void *oldSchema = pTable->schema; memcpy(pTable, pNew, pOper->rowSize); pTable->schema = pNew->schema; - free(pNew->vgList); + free(pNew->vgHash); free(pNew); free(oldSchema); } @@ -797,26 +794,26 @@ static void mgmtProcessCreateSuperTableMsg(SQueuedMsg *pMsg) { static void mgmtProcessDropSuperTableMsg(SQueuedMsg *pMsg) { SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable; if (pStable->numOfTables != 0) { - mgmtDropAllChildTablesInStable(pStable); - for (int32_t vg = 0; vg < pStable->vgLen; ++vg) { - int32_t vgId = pStable->vgList[vg]; - if (vgId == 0) break; - - SVgObj *pVgroup = mgmtGetVgroup(vgId); + SHashMutableIterator *pIter = taosHashCreateIter(pStable->vgHash); + while (taosHashIterNext(pIter)) { + int32_t *pVgId = taosHashIterGet(pIter); + SVgObj *pVgroup = mgmtGetVgroup(*pVgId); if (pVgroup == NULL) break; - + SMDDropSTableMsg *pDrop = rpcMallocCont(sizeof(SMDDropSTableMsg)); pDrop->contLen = htonl(sizeof(SMDDropSTableMsg)); - pDrop->vgId = htonl(vgId); + pDrop->vgId = htonl(pVgroup->vgId); pDrop->uid = htobe64(pStable->uid); mgmtExtractTableName(pStable->info.tableId, pDrop->tableId); - mPrint("stable:%s, send drop stable msg to vgId:%d", pStable->info.tableId, vgId); + mPrint("stable:%s, send drop stable msg to vgId:%d", pStable->info.tableId, pVgroup->vgId); SRpcIpSet ipSet = mgmtGetIpSetFromVgroup(pVgroup); SRpcMsg rpcMsg = {.pCont = pDrop, .contLen = sizeof(SMDDropSTableMsg), .msgType = TSDB_MSG_TYPE_MD_DROP_STABLE}; dnodeSendMsgToDnode(&ipSet, &rpcMsg); mgmtDecVgroupRef(pVgroup); } + + mgmtDropAllChildTablesInStable(pStable); } SSdbOper oper = { @@ -1243,59 +1240,58 @@ static void mgmtGetSuperTableMeta(SQueuedMsg *pMsg) { static void mgmtProcessSuperTableVgroupMsg(SQueuedMsg *pMsg) { SCMSTableVgroupMsg *pInfo = pMsg->pCont; int32_t numOfTable = htonl(pInfo->numOfTables); - - char* name = (char*) pInfo + sizeof(struct SCMSTableVgroupMsg); - SCMSTableVgroupRspMsg *pRsp = NULL; - - // todo set the initial size to be 10, fix me - int32_t contLen = sizeof(SCMSTableVgroupRspMsg) + (sizeof(SCMVgroupInfo) * 10 + sizeof(SVgroupsInfo))*numOfTable; - - pRsp = rpcMallocCont(contLen); + + // reserve space + int32_t contLen = sizeof(SCMSTableVgroupRspMsg) + 32 * sizeof(SCMVgroupInfo) + sizeof(SVgroupsInfo); + for (int32_t i = 0; i < numOfTable; ++i) { + char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN) * i; + SSuperTableObj *pTable = mgmtGetSuperTable(stableName); + if (pTable->vgHash != NULL) { + contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SCMVgroupInfo) + sizeof(SVgroupsInfo)); + } + mgmtDecTableRef(pTable); + } + + SCMSTableVgroupRspMsg *pRsp = rpcMallocCont(contLen); if (pRsp == NULL) { mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_SERV_OUT_OF_MEMORY); return; } - + pRsp->numOfTables = htonl(numOfTable); char* msg = (char*) pRsp + sizeof(SCMSTableVgroupRspMsg); - - for(int32_t i = 0; i < numOfTable; ++i) { - SSuperTableObj *pTable = mgmtGetSuperTable(name); - - pMsg->pTable = (STableObj *)pTable; - if (pMsg->pTable == NULL) { - mgmtSendSimpleResp(pMsg->thandle, TSDB_CODE_INVALID_TABLE); - return; - } - - SVgroupsInfo* pVgroup = (SVgroupsInfo*) msg; - - int32_t vg = 0; - for (; vg < pTable->vgLen; ++vg) { - int32_t vgId = pTable->vgList[vg]; - if (vgId == 0) break; - - SVgObj *vgItem = mgmtGetVgroup(vgId); - if (vgItem == NULL) break; - - pVgroup->vgroups[vg].vgId = htonl(vgId); - for (int32_t vn = 0; vn < vgItem->numOfVnodes; ++vn) { - SDnodeObj *pDnode = vgItem->vnodeGid[vn].pDnode; + + for (int32_t i = 0; i < numOfTable; ++i) { + char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN) * i; + SSuperTableObj *pTable = mgmtGetSuperTable(stableName); + SVgroupsInfo *pVgroupInfo = (SVgroupsInfo *)msg; + + SHashMutableIterator *pIter = taosHashCreateIter(pTable->vgHash); + int32_t vgSize = 0; + while (taosHashIterNext(pIter)) { + int32_t *pVgId = taosHashIterGet(pIter); + SVgObj * pVgroup = mgmtGetVgroup(*pVgId); + if (pVgroup == NULL) continue; + + pVgroupInfo->vgroups[vgSize].vgId = htonl(pVgroup->vgId); + for (int32_t vn = 0; vn < pVgroup->numOfVnodes; ++vn) { + SDnodeObj *pDnode = pVgroup->vnodeGid[vn].pDnode; if (pDnode == NULL) break; - - strncpy(pVgroup->vgroups[vg].ipAddr[vn].fqdn, pDnode->dnodeFqdn, tListLen(pDnode->dnodeFqdn)); - pVgroup->vgroups[vg].ipAddr[vn].port = htons(tsDnodeShellPort); - - pVgroup->vgroups[vg].numOfIps++; + + strncpy(pVgroupInfo->vgroups[vgSize].ipAddr[vn].fqdn, pDnode->dnodeFqdn, tListLen(pDnode->dnodeFqdn)); + pVgroupInfo->vgroups[vgSize].ipAddr[vn].port = htons(tsDnodeShellPort); + + pVgroupInfo->vgroups[vgSize].numOfIps++; } - - mgmtDecVgroupRef(vgItem); + + vgSize++; + mgmtDecVgroupRef(pVgroup); } - - pVgroup->numOfVgroups = htonl(vg); - + + pVgroupInfo->numOfVgroups = htonl(vgSize); + // one table is done, try the next table - msg += sizeof(SVgroupsInfo) + vg * sizeof(SCMVgroupInfo); + msg += sizeof(SVgroupsInfo) + vgSize * sizeof(SCMVgroupInfo); } SRpcMsg rpcRsp = {0}; diff --git a/src/query/inc/queryExecutor.h b/src/query/inc/queryExecutor.h index 906dadb31798b2a48b0ebb142c613511a49dee65..2088e5a49ddd4d249effdc915234c70efff961cc 100644 --- a/src/query/inc/queryExecutor.h +++ b/src/query/inc/queryExecutor.h @@ -110,6 +110,14 @@ typedef struct STableQueryInfo { // todo merge with the STableQueryInfo struct SWindowResInfo windowResInfo; } STableQueryInfo; +typedef struct SQueryCostSummary { +} SQueryCostSummary; + +typedef struct SGroupItem { + STableId id; + STableQueryInfo* info; +} SGroupItem; + typedef struct SQuery { int16_t numOfCols; int16_t numOfTags; @@ -131,17 +139,15 @@ typedef struct SQuery { SColumnInfo* tagColList; int32_t numOfFilterCols; int64_t* defaultVal; - TSKEY lastKey; +// TSKEY lastKey; uint32_t status; // query status SResultRec rec; int32_t pos; SData** sdata; + STableQueryInfo* current; SSingleColumnFilterInfo* pFilterInfo; } SQuery; -typedef struct SQueryCostSummary { -} SQueryCostSummary; - typedef struct SQueryRuntimeEnv { SResultInfo* resultInfo; // todo refactor to merge with SWindowResInfo SQuery* pQuery; diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h index a92f401323ad4d63cf9141d9fb7c0cdb569f6628..a11c03f2c0da8db902d6f24ba70fea56b37385f2 100644 --- a/src/query/inc/tsqlfunction.h +++ b/src/query/inc/tsqlfunction.h @@ -104,7 +104,7 @@ extern "C" { enum { MASTER_SCAN = 0x0u, - SUPPLEMENTARY_SCAN = 0x1u, + REVERSE_SCAN = 0x1u, REPEAT_SCAN = 0x2u, //repeat scan belongs to the master scan FIRST_STAGE_MERGE = 0x10u, SECONDARY_STAGE_MERGE = 0x20u, diff --git a/src/query/src/qast.c b/src/query/src/qast.c index 98682ce778976a1472317d8e8adf1c289411f862..43bdf57ed74426a4e100623c4a16c48cf5564b81 100644 --- a/src/query/src/qast.c +++ b/src/query/src/qast.c @@ -540,8 +540,11 @@ static int32_t setQueryCond(tQueryInfo *queryColInfo, SQueryCond* pCond) { static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* result) { SSkipListIterator* iter = NULL; + SQueryCond cond = {0}; - setQueryCond(pQueryInfo, &cond); + if (setQueryCond(pQueryInfo, &cond) != TSDB_CODE_SUCCESS) { + //todo handle error + } if (cond.start != NULL) { iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->keyInfo.type, TSDB_ORDER_ASC); @@ -552,18 +555,18 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr if (cond.start != NULL) { int32_t optr = cond.start->optr; - if (optr == TSDB_RELATION_EQUAL) { + if (optr == TSDB_RELATION_EQUAL) { // equals while(tSkipListIterNext(iter)) { SSkipListNode* pNode = tSkipListIterGet(iter); int32_t ret = pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v); - if (ret == 0) { - taosArrayPush(result, SL_GET_NODE_DATA(pNode)); - } else { + if (ret != 0) { break; } + + taosArrayPush(result, SL_GET_NODE_DATA(pNode)); } - } else if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL) { + } else if (optr == TSDB_RELATION_GREATER || optr == TSDB_RELATION_GREATER_EQUAL) { // greater equal bool comp = true; int32_t ret = 0; @@ -582,8 +585,33 @@ static void tQueryIndexColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArr comp = false; } } - } else if (optr == TSDB_RELATION_NOT_EQUAL) { - assert(0); + } else if (optr == TSDB_RELATION_NOT_EQUAL) { // not equal + bool comp = true; + + while(tSkipListIterNext(iter)) { + SSkipListNode* pNode = tSkipListIterGet(iter); + comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0); + if (comp) { + continue; + } + + taosArrayPush(result, SL_GET_NODE_DATA(pNode)); + } + + tSkipListDestroyIter(iter); + + comp = true; + iter = tSkipListCreateIterFromVal(pSkipList, (char*) cond.start->v, pSkipList->keyInfo.type, TSDB_ORDER_DESC); + while(tSkipListIterNext(iter)) { + SSkipListNode* pNode = tSkipListIterGet(iter); + comp = comp && (pQueryInfo->compare(SL_GET_NODE_KEY(pSkipList, pNode), cond.start->v) == 0); + if (comp) { + continue; + } + + taosArrayPush(result, SL_GET_NODE_DATA(pNode)); + } + } else { assert(0); } @@ -773,8 +801,6 @@ static void tSQLBinaryTraverseOnSkipList(tExprNode *pExpr, SArray *pResult, SSki tSkipListDestroyIter(iter); } - - static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, SArray* result) { SSkipListIterator* iter = tSkipListCreateIter(pSkipList); @@ -785,7 +811,7 @@ static void tQueryIndexlessColumn(SSkipList* pSkipList, tQueryInfo* pQueryInfo, char* pTable = SL_GET_NODE_DATA(pNode); //todo refactor: - char* name = (*(STable**) pTable)->name; + tstr* name = ((STableIndexElem*) pTable)->pTable->name; // char* name = NULL; // tsdbGetTableName(tsdb, pTable, &name); diff --git a/src/query/src/queryExecutor.c b/src/query/src/queryExecutor.c index 0ffc665e553c6f19046057d0a79a7be1678ef693..52cfa582d080fed6816fd32f3d9457f5a946e851 100644 --- a/src/query/src/queryExecutor.c +++ b/src/query/src/queryExecutor.c @@ -40,9 +40,9 @@ #define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP) #define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN) -#define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == SUPPLEMENTARY_SCAN) +#define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN) #define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN) -#define SET_REVERSE_SCAN_FLAG(runtime) ((runtime)->scanFlag = SUPPLEMENTARY_SCAN) +#define SET_REVERSE_SCAN_FLAG(runtime) ((runtime)->scanFlag = REVERSE_SCAN) #define GET_QINFO_ADDR(x) ((void *)((char *)(x)-offsetof(SQInfo, runtimeEnv))) @@ -96,11 +96,6 @@ typedef struct { STSCursor cur; } SQueryStatusInfo; -typedef struct SGroupItem { - STableId id; - STableQueryInfo* info; -} SGroupItem; - static void setQueryStatus(SQuery *pQuery, int8_t status); static bool isIntervalQuery(SQuery *pQuery) { return pQuery->intervalTime > 0; } @@ -121,7 +116,7 @@ static bool hasMainOutput(SQuery *pQuery); static void createTableQueryInfo(SQInfo *pQInfo); static void buildTagQueryResult(SQInfo *pQInfo); -static int32_t setAdditionalInfo(SQInfo *pQInfo, STableId *pTaleId, STableQueryInfo *pTableQueryInfo); +static int32_t setAdditionalInfo(SQInfo *pQInfo, STableId *pTableId, STableQueryInfo *pTableQueryInfo); static int32_t flushFromResultBuf(SQInfo *pQInfo); bool getNeighborPoints(SQInfo *pQInfo, void *pMeterObj, SPointInterpoSupporter *pPointInterpSupporter) { @@ -428,9 +423,10 @@ static bool hasNullValue(SQuery *pQuery, int32_t col, SDataBlockInfo *pDataBlock return false; } - *pColStatis = NULL; if (pStatis != NULL) { *pColStatis = getStatisInfo(pQuery, pStatis, pDataBlockInfo, col); + } else { + *pColStatis = NULL; } if ((*pColStatis) != NULL && (*pColStatis)->numOfNull == 0) { @@ -620,8 +616,8 @@ static void doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey, } // query completed - if ((lastKey >= pQuery->window.ekey && QUERY_IS_ASC_QUERY(pQuery)) || - (lastKey <= pQuery->window.ekey && !QUERY_IS_ASC_QUERY(pQuery))) { + if ((lastKey >= pQuery->current->win.ekey && QUERY_IS_ASC_QUERY(pQuery)) || + (lastKey <= pQuery->current->win.ekey && !QUERY_IS_ASC_QUERY(pQuery))) { closeAllTimeWindow(pWindowResInfo); pWindowResInfo->curIndex = pWindowResInfo->size - 1; @@ -661,22 +657,22 @@ static void doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey, setQueryStatus(pQuery, QUERY_RESBUF_FULL); } - qTrace("QInfo:%p total window:%d, closed:%d", GET_QINFO_ADDR(pQuery), pWindowResInfo->size, n); + qTrace("QInfo:%p total window:%d, closed:%d", GET_QINFO_ADDR(pRuntimeEnv), pWindowResInfo->size, n); } assert(pWindowResInfo->prevSKey != 0); } static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlockInfo, TSKEY *pPrimaryColumn, - int32_t startPos, TSKEY ekey, __block_search_fn_t searchFn, - bool updateLastKey) { + int32_t startPos, TSKEY ekey, __block_search_fn_t searchFn, bool updateLastKey) { assert(startPos >= 0 && startPos < pDataBlockInfo->rows); int32_t num = -1; int32_t order = pQuery->order.order; - int32_t step = GET_FORWARD_DIRECTION_FACTOR(order); + STableQueryInfo* item = pQuery->current; + if (QUERY_IS_ASC_QUERY(pQuery)) { if (ekey < pDataBlockInfo->window.ekey) { num = getForwardStepsInBlock(pDataBlockInfo->rows, searchFn, ekey, startPos, order, pPrimaryColumn); @@ -684,13 +680,13 @@ static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlo assert(ekey < pPrimaryColumn[startPos]); } else { if (updateLastKey) { - pQuery->lastKey = pPrimaryColumn[startPos + (num - 1)] + step; + item->lastKey = pPrimaryColumn[startPos + (num - 1)] + step; } } } else { num = pDataBlockInfo->rows - startPos; if (updateLastKey) { - pQuery->lastKey = pDataBlockInfo->window.ekey + step; + item->lastKey = pDataBlockInfo->window.ekey + step; } } } else { // desc @@ -700,13 +696,13 @@ static int32_t getNumOfRowsInTimeWindow(SQuery *pQuery, SDataBlockInfo *pDataBlo assert(ekey > pPrimaryColumn[startPos]); } else { if (updateLastKey) { - pQuery->lastKey = pPrimaryColumn[startPos - (num - 1)] + step; + item->lastKey = pPrimaryColumn[startPos - (num - 1)] + step; } } } else { num = startPos + 1; if (updateLastKey) { - pQuery->lastKey = pDataBlockInfo->window.skey + step; + item->lastKey = pDataBlockInfo->window.skey + step; } } } @@ -820,7 +816,21 @@ static TSKEY reviseWindowEkey(SQuery *pQuery, STimeWindow *pWindow) { return ekey; } -char *getDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas, int32_t col, int32_t size, +//todo binary search +static void* getDataBlockImpl(SArray* pDataBlock, int32_t colId) { + int32_t numOfCols = taosArrayGetSize(pDataBlock); + + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData *p = taosArrayGet(pDataBlock, i); + if (colId == p->info.colId) { + return p->pData; + } + } + + return NULL; +} + +static char *getDataBlock(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas, int32_t col, int32_t size, SArray *pDataBlock) { char *dataBlock = NULL; SQuery *pQuery = pRuntimeEnv->pQuery; @@ -867,20 +877,7 @@ char *getDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, SArithmeticSupport *sas, int3 if (TSDB_COL_IS_TAG(pCol->flag) || pDataBlock == NULL) { dataBlock = NULL; } else { - /* - * the colIndex is acquired from the first meter of all qualified meters in this vnode during query prepare - * stage, the remain meter may not have the required column in cache actually. So, the validation of required - * column in cache with the corresponding meter schema is reinforced. - */ - int32_t numOfCols = taosArrayGetSize(pDataBlock); - - for (int32_t i = 0; i < numOfCols; ++i) { - SColumnInfoData *p = taosArrayGet(pDataBlock, i); - if (pCol->colId == p->info.colId) { - dataBlock = p->pData; - break; - } - } + dataBlock = getDataBlockImpl(pDataBlock, pCol->colId); } } @@ -904,8 +901,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * SQuery * pQuery = pRuntimeEnv->pQuery; SColumnInfoData *pColInfo = NULL; - TSKEY * primaryKeyCol = NULL; - + + TSKEY *primaryKeyCol = NULL; if (pDataBlock != NULL) { pColInfo = taosArrayGet(pDataBlock, 0); primaryKeyCol = (TSKEY *)(pColInfo->pData); @@ -919,7 +916,7 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * SDataStatis *tpField = NULL; bool hasNull = hasNullValue(pQuery, k, pDataBlockInfo, pStatis, &tpField); - char *dataBlock = getDataBlocks(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock); + char *dataBlock = getDataBlock(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock); setExecParams(pQuery, &pCtx[k], dataBlock, primaryKeyCol, pDataBlockInfo->rows, functionId, tpField, hasNull, &sasArray[k], pRuntimeEnv->scanFlag); @@ -1095,15 +1092,20 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) { static bool functionNeedToExecute(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t functionId) { SResultInfo *pResInfo = GET_RES_INFO(pCtx); - + SQuery* pQuery = pRuntimeEnv->pQuery; + if (pResInfo->complete || functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) { return false; } + if (functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_LAST) { + return !QUERY_IS_ASC_QUERY(pQuery); + } else if (functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_FIRST) { + return QUERY_IS_ASC_QUERY(pQuery); + } + // in the supplementary scan, only the following functions need to be executed - if (IS_REVERSE_SCAN(pRuntimeEnv) && - !(functionId == TSDB_FUNC_LAST_DST || functionId == TSDB_FUNC_FIRST_DST || functionId == TSDB_FUNC_FIRST || - functionId == TSDB_FUNC_LAST || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS)) { + if (IS_REVERSE_SCAN(pRuntimeEnv)) {// && (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS)) { return false; } @@ -1115,9 +1117,10 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; SQuery *pQuery = pRuntimeEnv->pQuery; + STableQueryInfo* item = pQuery->current; + TSKEY *primaryKeyCol = (TSKEY*) ((SColumnInfoData *)taosArrayGet(pDataBlock, 0))->pData; - - bool groupbyStateValue = isGroupbyNormalCol(pQuery->pGroupbyExpr); + bool groupbyStateValue = isGroupbyNormalCol(pQuery->pGroupbyExpr); SArithmeticSupport *sasArray = calloc((size_t)pQuery->numOfOutput, sizeof(SArithmeticSupport)); int16_t type = 0; @@ -1134,7 +1137,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS SDataStatis *pColStatis = NULL; bool hasNull = hasNullValue(pQuery, k, pDataBlockInfo, pStatis, &pColStatis); - char *dataBlock = getDataBlocks(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock); + char *dataBlock = getDataBlock(pRuntimeEnv, &sasArray[k], k, pDataBlockInfo->rows, pDataBlock); setExecParams(pQuery, &pCtx[k], dataBlock, primaryKeyCol, pDataBlockInfo->rows, functionId, pColStatis, hasNull, &sasArray[k], pRuntimeEnv->scanFlag); @@ -1143,7 +1146,8 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS // set the input column data for (int32_t k = 0; k < pQuery->numOfFilterCols; ++k) { SSingleColumnFilterInfo *pFilterInfo = &pQuery->pFilterInfo[k]; - pFilterInfo->pData = getDataBlocks(pRuntimeEnv, &sasArray[k], pFilterInfo->info.colId, pDataBlockInfo->rows, pDataBlock); + pFilterInfo->pData = getDataBlockImpl(pDataBlock, pFilterInfo->info.colId); + assert(pFilterInfo->pData != NULL); } int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); @@ -1157,10 +1161,10 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS } int32_t j = 0; - TSKEY lastKey = -1; - + int32_t offset = -1; + for (j = 0; j < pDataBlockInfo->rows; ++j) { - int32_t offset = GET_COL_DATA_POS(pQuery, j, step); + offset = GET_COL_DATA_POS(pQuery, j, step); if (pRuntimeEnv->pTSBuf != NULL) { int32_t r = doTSJoinFilter(pRuntimeEnv, offset); @@ -1194,7 +1198,6 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS SWindowStatus *pStatus = getTimeWindowResStatus(pWindowResInfo, curTimeWindow(pWindowResInfo)); doRowwiseApplyFunctions(pRuntimeEnv, pStatus, &win, offset); - lastKey = ts; STimeWindow nextWin = win; int32_t index = pWindowResInfo->curIndex; @@ -1223,17 +1226,14 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS } else { // other queries // decide which group this rows belongs to according to current state value if (groupbyStateValue) { - char *stateVal = groupbyColumnData + bytes * offset; + char *val = groupbyColumnData + bytes * offset; - int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, stateVal, type, bytes); + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, val, type, bytes); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code continue; } } - // update the lastKey - lastKey = primaryKeyCol[offset]; - // all startOffset are identical offset -= pCtx[0].startOffset; @@ -1254,7 +1254,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS } } - pQuery->lastKey = lastKey + step; + item->lastKey = primaryKeyCol[offset] + step; // todo refactor: extract method for(int32_t i = 0; i < pQuery->numOfOutput; ++i) { @@ -1269,10 +1269,12 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS } static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pDataBlockInfo, - SDataStatis *pStatis, __block_search_fn_t searchFn, - SWindowResInfo *pWindowResInfo, SArray *pDataBlock) { + SDataStatis *pStatis, __block_search_fn_t searchFn, SArray *pDataBlock) { SQuery *pQuery = pRuntimeEnv->pQuery; - + + STableQueryInfo* pTableQInfo = pQuery->current; + SWindowResInfo* pWindowResInfo = &pRuntimeEnv->windowResInfo; + if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock); } else { @@ -1280,7 +1282,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl } TSKEY lastKey = QUERY_IS_ASC_QUERY(pQuery) ? pDataBlockInfo->window.ekey : pDataBlockInfo->window.skey; - pQuery->lastKey = lastKey + GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + pTableQInfo->lastKey = lastKey + GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); doCheckQueryCompleted(pRuntimeEnv, lastKey, pWindowResInfo); @@ -1746,73 +1748,6 @@ static UNUSED_FUNC bool doSetDataInfo(SQInfo *pQInfo, SPointInterpoSupporter *pP } } -// TODO refactor code, the best way to implement the last_row is utilizing the iterator -bool normalizeUnBoundLastRowQuery(SQInfo *pQInfo, SPointInterpoSupporter *pPointInterpSupporter) { -#if 0 - SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; - - SQuery * pQuery = pRuntimeEnv->pQuery; - SMeterObj *pMeterObj = pRuntimeEnv->pTabObj; - - assert(!QUERY_IS_ASC_QUERY(pQuery) && notHasQueryTimeRange(pQuery)); - __block_search_fn_t searchFn = vnodeSearchKeyFunc[pMeterObj->searchAlgorithm]; - - TSKEY lastKey = -1; - - pQuery->fileId = -1; - vnodeFreeFieldsEx(pRuntimeEnv); - - // keep in-memory cache status in local variables in case that it may be changed by write operation - getBasicCacheInfoSnapshot(pQuery, pMeterObj->pCache, pMeterObj->vnode); - - SCacheInfo *pCacheInfo = (SCacheInfo *)pMeterObj->pCache; - if (pCacheInfo != NULL && pCacheInfo->cacheBlocks != NULL && pQuery->numOfBlocks > 0) { - pQuery->fileId = -1; - TSKEY key = pMeterObj->lastKey; - - pQuery->window.skey = key; - pQuery->window.ekey = key; - pQuery->lastKey = pQuery->window.skey; - - /* - * cache block may have been flushed to disk, and no data in cache anymore. - * So, copy cache block to local buffer is required. - */ - lastKey = getQueryStartPositionInCache(pRuntimeEnv, &pQuery->slot, &pQuery->pos, false); - if (lastKey < 0) { // data has been flushed to disk, try again search in file - lastKey = getQueryPositionForCacheInvalid(pRuntimeEnv, searchFn); - - if (Q_STATUS_EQUAL(pQuery->status, QUERY_NO_DATA_TO_CHECK | QUERY_COMPLETED)) { - return false; - } - } - } else { // no data in cache, try file - TSKEY key = pMeterObj->lastKeyOnFile; - - pQuery->window.skey = key; - pQuery->window.ekey = key; - pQuery->lastKey = pQuery->window.skey; - - bool ret = getQualifiedDataBlock(pMeterObj, pRuntimeEnv, QUERY_RANGE_LESS_EQUAL, searchFn); - if (!ret) { // no data in file, return false; - return false; - } - - lastKey = getTimestampInDiskBlock(pRuntimeEnv, pQuery->pos); - } - - assert(lastKey <= pQuery->window.skey); - - pQuery->window.skey = lastKey; - pQuery->window.ekey = lastKey; - pQuery->lastKey = pQuery->window.skey; - - return getNeighborPoints(pQInfo, pMeterObj, pPointInterpSupporter); -#endif - - return true; -} - static void setScanLimitationByResultBuffer(SQuery *pQuery) { if (isTopBottomQuery(pQuery)) { pQuery->checkBuffer = 0; @@ -2327,7 +2262,7 @@ static void getNextTimeWindow(SQuery *pQuery, STimeWindow *pTimeWindow) { pTimeWindow->ekey = pTimeWindow->skey + (pQuery->intervalTime - 1); } -SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo, SDataStatis **pStatis) { +SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, void* pQueryHandle, SDataBlockInfo* pBlockInfo, SDataStatis **pStatis) { SQuery *pQuery = pRuntimeEnv->pQuery; uint32_t r = 0; @@ -2351,16 +2286,16 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBl qTrace("QInfo:%p data block ignored, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_QINFO_ADDR(pRuntimeEnv), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); } else if (r == BLK_DATA_FILEDS_NEEDED) { - if (tsdbRetrieveDataBlockStatisInfo(pRuntimeEnv->pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) { + if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) { // return DISK_DATA_LOAD_FAILED; } if (*pStatis == NULL) { - pDataBlock = tsdbRetrieveDataBlock(pRuntimeEnv->pQueryHandle, NULL); + pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL); } } else { assert(r == BLK_DATA_ALL_NEEDED); - if (tsdbRetrieveDataBlockStatisInfo(pRuntimeEnv->pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) { + if (tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis) != TSDB_CODE_SUCCESS) { // return DISK_DATA_LOAD_FAILED; } @@ -2376,7 +2311,7 @@ SArray *loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBl // return DISK_DATA_DISCARDED; } - pDataBlock = tsdbRetrieveDataBlock(pRuntimeEnv->pQueryHandle, NULL); + pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL); } return pDataBlock; @@ -2447,8 +2382,11 @@ int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order) { static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; + STableQueryInfo* pTableQueryInfo = pQuery->current; + qTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", lastkey:%" PRId64 ", order:%d", - GET_QINFO_ADDR(pRuntimeEnv), pQuery->window.skey, pQuery->window.ekey, pQuery->lastKey, pQuery->order.order); + GET_QINFO_ADDR(pRuntimeEnv), pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey, pTableQueryInfo->lastKey, + pQuery->order.order); TsdbQueryHandleT pQueryHandle = IS_MASTER_SCAN(pRuntimeEnv)? pRuntimeEnv->pQueryHandle : pRuntimeEnv->pSecQueryHandle; while (tsdbNextDataBlock(pQueryHandle)) { @@ -2479,7 +2417,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { } } - // in case of prj/diff query, ensure the output buffer is sufficient to accomodate the results of current block + // in case of prj/diff query, ensure the output buffer is sufficient to accommodate the results of current block if (!isIntervalQuery(pQuery) && !isGroupbyNormalCol(pQuery->pGroupbyExpr) && !isFixedOutputQuery(pQuery)) { SResultRec *pRec = &pQuery->rec; @@ -2505,11 +2443,10 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { } SDataStatis *pStatis = NULL; - SArray * pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, &blockInfo, &pStatis); + SArray * pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis); pQuery->pos = QUERY_IS_ASC_QUERY(pQuery) ? 0 : blockInfo.rows - 1; - int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, - &pRuntimeEnv->windowResInfo, pDataBlock); + int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, pDataBlock); qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", rows:%d, res:%d", GET_QINFO_ADDR(pRuntimeEnv), blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes); @@ -2520,7 +2457,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { } } - // if the result buffer is not full, set the query completed flag + // if the result buffer is not full, set the query complete if (!Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) { setQueryStatus(pQuery, QUERY_COMPLETED); } @@ -2530,8 +2467,8 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { int32_t step = QUERY_IS_ASC_QUERY(pQuery) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP; closeAllTimeWindow(&pRuntimeEnv->windowResInfo); - removeRedundantWindow(&pRuntimeEnv->windowResInfo, pQuery->lastKey - step, step); - pRuntimeEnv->windowResInfo.curIndex = pRuntimeEnv->windowResInfo.size - 1; + removeRedundantWindow(&pRuntimeEnv->windowResInfo, pTableQueryInfo->lastKey - step, step); + pRuntimeEnv->windowResInfo.curIndex = pRuntimeEnv->windowResInfo.size - 1; // point to the last time window } else { assert(Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)); } @@ -2540,8 +2477,6 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { return 0; } -static void updatelastkey(SQuery *pQuery, STableQueryInfo *pTableQInfo) { pTableQInfo->lastKey = pQuery->lastKey; } - /* * set tag value in SQLFunctionCtx * e.g.,tag information into input buffer @@ -2554,17 +2489,17 @@ static void doSetTagValueInParam(void *tsdb, STableId* pTableId, int32_t tagColI int16_t type = 0; if (tagColId == TSDB_TBNAME_COLUMN_INDEX) { - tsdbGetTableName(tsdb, pTableId, &val); - bytes = strnlen(val, TSDB_TABLE_NAME_LEN); + val = tsdbGetTableName(tsdb, pTableId, &bytes); type = TSDB_DATA_TYPE_BINARY; + tVariantCreateFromBinary(param, varDataVal(val), varDataLen(val), type); } else { tsdbGetTableTagVal(tsdb, pTableId, tagColId, &type, &bytes, &val); - } - - tVariantCreateFromBinary(param, val, bytes, type); - - if (tagColId == TSDB_TBNAME_COLUMN_INDEX) { - tfree(val); + + if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { + tVariantCreateFromBinary(param, varDataVal(val), varDataLen(val), type); + } else { + tVariantCreateFromBinary(param, val, bytes, type); + } } } @@ -3082,11 +3017,6 @@ void resetMergeResultBuf(SQuery *pQuery, SQLFunctionCtx *pCtx, SResultInfo *pRes } } -void setTableDataInfo(STableQueryInfo *pTableQueryInfo, int32_t tableIndex, int32_t groupId) { - pTableQueryInfo->groupIdx = groupId; - pTableQueryInfo->tableIndex = tableIndex; -} - static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo *pTableQueryInfo) { if (pTableQueryInfo == NULL) { return; @@ -3132,19 +3062,6 @@ static void disableFuncInReverseScanImpl(SQInfo* pQInfo, SWindowResInfo *pWindow } } } - - int32_t numOfGroups = taosArrayGetSize(pQInfo->groupInfo.pGroupList); - - for(int32_t i = 0; i < numOfGroups; ++i) { - SArray *group = taosArrayGetP(pQInfo->groupInfo.pGroupList, i); - qTrace("QInfo:%p no result in group %d, continue", pQInfo, pQInfo->groupIndex - 1); - - size_t t = taosArrayGetSize(group); - for (int32_t j = 0; j < t; ++j) { - SGroupItem *item = taosArrayGet(group, j); - updateTableQueryInfoForReverseScan(pQuery, item->info); - } - } } void disableFuncInReverseScan(SQInfo *pQInfo) { @@ -3170,6 +3087,18 @@ void disableFuncInReverseScan(SQInfo *pQInfo) { } } } + + int32_t numOfGroups = taosArrayGetSize(pQInfo->groupInfo.pGroupList); + + for(int32_t i = 0; i < numOfGroups; ++i) { + SArray *group = taosArrayGetP(pQInfo->groupInfo.pGroupList, i); + + size_t t = taosArrayGetSize(group); + for (int32_t j = 0; j < t; ++j) { + SGroupItem *item = taosArrayGet(group, j); + updateTableQueryInfoForReverseScan(pQuery, item->info); + } + } } void switchCtxOrder(SQueryRuntimeEnv *pRuntimeEnv) { @@ -3345,13 +3274,14 @@ bool needScanDataBlocksAgain(SQueryRuntimeEnv *pRuntimeEnv) { static SQueryStatusInfo getQueryStatusInfo(SQueryRuntimeEnv *pRuntimeEnv) { SQuery *pQuery = pRuntimeEnv->pQuery; - + STableQueryInfo* pTableQueryInfo = pQuery->current; + SQueryStatusInfo info = { - .status = pQuery->status, + .status = pQuery->status, .windowIndex = pRuntimeEnv->windowResInfo.curIndex, - .lastKey = pQuery->lastKey, - .w = pQuery->window, - .curWindow = {.skey = pQuery->lastKey, .ekey = pQuery->window.ekey}, + .lastKey = pTableQueryInfo->lastKey, + .w = pQuery->window, + .curWindow = {.skey = pTableQueryInfo->lastKey, .ekey = pTableQueryInfo->win.ekey}, }; return info; @@ -3376,7 +3306,7 @@ static void setEnvBeforeReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusI STsdbQueryCond cond = { .twindow = pQuery->window, - .order = pQuery->order.order, + .order = pQuery->order.order, .colList = pQuery->colList, .numOfCols = pQuery->numOfCols, }; @@ -3395,6 +3325,7 @@ static void setEnvBeforeReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusI static void clearEnvAfterReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusInfo *pStatus) { SQuery *pQuery = pRuntimeEnv->pQuery; + STableQueryInfo* pTableQueryInfo = pQuery->current; SWITCH_ORDER(pQuery->order.order); switchCtxOrder(pRuntimeEnv); @@ -3408,17 +3339,19 @@ static void clearEnvAfterReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus // update the pQuery->window.skey and pQuery->window.ekey to limit the scan scope of sliding query // during reverse scan - pQuery->lastKey = pStatus->lastKey; + pTableQueryInfo->lastKey = pStatus->lastKey; pQuery->status = pStatus->status; - pQuery->window = pStatus->w; + pTableQueryInfo->win = pStatus->w; } void scanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { + SQInfo *pQInfo = (SQInfo *) GET_QINFO_ADDR(pRuntimeEnv); SQuery *pQuery = pRuntimeEnv->pQuery; + STableQueryInfo *pTableQueryInfo = pQuery->current; + setQueryStatus(pQuery, QUERY_NOT_COMPLETED); // store the start query position - SQInfo * pQInfo = (SQInfo *)GET_QINFO_ADDR(pRuntimeEnv); SQueryStatusInfo qstatus = getQueryStatusInfo(pRuntimeEnv); SET_MASTER_SCAN_FLAG(pRuntimeEnv); @@ -3429,7 +3362,7 @@ void scanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { if (pRuntimeEnv->scanFlag == MASTER_SCAN) { qstatus.status = pQuery->status; - qstatus.curWindow.ekey = pQuery->lastKey - step; + qstatus.curWindow.ekey = pTableQueryInfo->lastKey - step; } if (!needScanDataBlocksAgain(pRuntimeEnv)) { @@ -3443,7 +3376,7 @@ void scanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { STsdbQueryCond cond = { .twindow = qstatus.curWindow, - .order = pQuery->order.order, + .order = pQuery->order.order, .colList = pQuery->colList, .numOfCols = pQuery->numOfCols, }; @@ -3549,12 +3482,10 @@ void destroyTableQueryInfo(STableQueryInfo *pTableQueryInfo, int32_t numOfCols) void restoreIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *pTableQueryInfo) { SQuery *pQuery = pRuntimeEnv->pQuery; - - pQuery->window = pTableQueryInfo->win; - pQuery->lastKey = pTableQueryInfo->lastKey; - - assert(((pQuery->lastKey >= pQuery->window.skey) && QUERY_IS_ASC_QUERY(pQuery)) || - ((pQuery->lastKey <= pQuery->window.skey) && !QUERY_IS_ASC_QUERY(pQuery))); + pQuery->current = pTableQueryInfo; + + assert(((pTableQueryInfo->lastKey >= pTableQueryInfo->win.skey) && QUERY_IS_ASC_QUERY(pQuery)) || + ((pTableQueryInfo->lastKey <= pTableQueryInfo->win.skey) && !QUERY_IS_ASC_QUERY(pQuery))); } /** @@ -3562,8 +3493,10 @@ void restoreIntervalQueryRange(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo *p * @param pRuntimeEnv * @param pDataBlockInfo */ -void setExecutionContext(SQInfo *pQInfo, STableQueryInfo *pTableQueryInfo, STableId* pTableId, int32_t groupIdx, TSKEY nextKey) { +void setExecutionContext(SQInfo *pQInfo, STableId* pTableId, int32_t groupIdx, TSKEY nextKey) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; + STableQueryInfo *pTableQueryInfo = pRuntimeEnv->pQuery->current; + SWindowResInfo * pWindowResInfo = &pRuntimeEnv->windowResInfo; int32_t GROUPRESULTID = 1; @@ -3647,12 +3580,12 @@ int32_t setAdditionalInfo(SQInfo *pQInfo, STableId* pTableId, STableQueryInfo *p * merged during merge stage. In this case, we need the pTableQueryInfo->lastResRows to decide if there * is a previous result generated or not. */ -void setIntervalQueryRange(STableQueryInfo *pTableQueryInfo, SQInfo *pQInfo, TSKEY key) { +void setIntervalQueryRange(SQInfo *pQInfo, TSKEY key) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQuery * pQuery = pRuntimeEnv->pQuery; - + STableQueryInfo *pTableQueryInfo = pQuery->current; + if (pTableQueryInfo->queryRangeSet) { - pQuery->lastKey = key; pTableQueryInfo->lastKey = key; } else { pQuery->window.skey = key; @@ -3689,8 +3622,6 @@ void setIntervalQueryRange(STableQueryInfo *pTableQueryInfo, SQInfo *pQInfo, TSK pTableQueryInfo->queryRangeSet = 1; pTableQueryInfo->lastKey = pQuery->window.skey; pTableQueryInfo->win.skey = pQuery->window.skey; - - pQuery->lastKey = pQuery->window.skey; } } @@ -3710,7 +3641,9 @@ bool needPrimaryTimestampCol(SQuery *pQuery, SDataBlockInfo *pDataBlockInfo) { * 2. if there are top/bottom, first_dst/last_dst functions, we need to load timestamp column in any cases; */ STimeWindow *w = &pDataBlockInfo->window; - bool loadPrimaryTS = (pQuery->lastKey >= w->skey && pQuery->lastKey <= w->ekey) || + STableQueryInfo* pTableQueryInfo = pQuery->current; + + bool loadPrimaryTS = (pTableQueryInfo->lastKey >= w->skey && pTableQueryInfo->lastKey <= w->ekey) || (pQuery->window.ekey >= w->skey && pQuery->window.ekey <= w->ekey) || requireTimestamp(pQuery); return loadPrimaryTS; @@ -3847,7 +3780,6 @@ void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, STableQueryInfo } updateWindowResNumOfRes(pRuntimeEnv, pTableQueryInfo); - updatelastkey(pQuery, pTableQueryInfo); } bool vnodeHasRemainResults(void *handle) { @@ -4041,10 +3973,12 @@ void vnodePrintQueryStatistics(SQInfo *pQInfo) { static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBlockInfo) { SQuery *pQuery = pRuntimeEnv->pQuery; + STableQueryInfo* pTableQueryInfo = pQuery->current; + int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); if (pQuery->limit.offset == pBlockInfo->rows) { // current block will ignore completed - pQuery->lastKey = QUERY_IS_ASC_QUERY(pQuery) ? pBlockInfo->window.ekey + step : pBlockInfo->window.skey + step; + pTableQueryInfo->lastKey = QUERY_IS_ASC_QUERY(pQuery) ? pBlockInfo->window.ekey + step : pBlockInfo->window.skey + step; pQuery->limit.offset = 0; return; } @@ -4064,11 +3998,10 @@ static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBloc TSKEY *keys = (TSKEY *)pColInfoData->pData; // update the offset value - pQuery->lastKey = keys[pQuery->pos]; + pTableQueryInfo->lastKey = keys[pQuery->pos]; pQuery->limit.offset = 0; - int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, - &pRuntimeEnv->windowResInfo, pDataBlock); + int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock); qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", rows:%d, res:%d", GET_QINFO_ADDR(pRuntimeEnv), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes); @@ -4084,6 +4017,7 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { pQuery->pos = 0; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); + STableQueryInfo* pTableQueryInfo = pQuery->current; TsdbQueryHandleT pQueryHandle = pRuntimeEnv->pQueryHandle; while (tsdbNextDataBlock(pQueryHandle)) { @@ -4095,8 +4029,8 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { if (pQuery->limit.offset > blockInfo.rows) { pQuery->limit.offset -= blockInfo.rows; - pQuery->lastKey = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.window.ekey : blockInfo.window.skey; - pQuery->lastKey += step; + pTableQueryInfo->lastKey = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.window.ekey : blockInfo.window.skey; + pTableQueryInfo->lastKey += step; qTrace("QInfo:%p skip rows:%d, offset:%" PRId64 "", GET_QINFO_ADDR(pRuntimeEnv), blockInfo.rows, pQuery->limit.offset); @@ -4125,6 +4059,7 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv) { TSKEY skey1, ekey1; STimeWindow w = {0}; SWindowResInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; + STableQueryInfo *pTableQueryInfo = pQuery->current; while (tsdbNextDataBlock(pRuntimeEnv->pQueryHandle)) { SDataBlockInfo blockInfo = tsdbRetrieveDataBlockInfo(pRuntimeEnv->pQueryHandle); @@ -4170,11 +4105,10 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv) { // set the abort info pQuery->pos = startPos; - pQuery->lastKey = ((TSKEY *)pColInfoData->pData)[startPos]; + pTableQueryInfo->lastKey = ((TSKEY *)pColInfoData->pData)[startPos]; pWindowResInfo->prevSKey = tw.skey; - int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, NULL, binarySearchForKey, - &pRuntimeEnv->windowResInfo, pDataBlock); + int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, NULL, binarySearchForKey, pDataBlock); qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", rows:%d, res:%d", GET_QINFO_ADDR(pRuntimeEnv), blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes); @@ -4199,7 +4133,7 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv) { // set the abort info pQuery->pos = startPos; - pQuery->lastKey = ((TSKEY *)pColInfoData->pData)[startPos]; + pTableQueryInfo->lastKey = ((TSKEY *)pColInfoData->pData)[startPos]; pWindowResInfo->prevSKey = tw.skey; win = tw; } else { @@ -4220,12 +4154,9 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool setScanLimitationByResultBuffer(pQuery); changeExecuteScanOrder(pQuery, false); - // dataInCache requires lastKey value - pQuery->lastKey = pQuery->window.skey; - STsdbQueryCond cond = { .twindow = pQuery->window, - .order = pQuery->order.order, + .order = pQuery->order.order, .colList = pQuery->colList, .numOfCols = pQuery->numOfCols, }; @@ -4238,6 +4169,9 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool } else if (!isSTableQuery || isIntervalQuery(pQuery) || isFixedOutputQuery(pQuery)) { pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQInfo->tableIdGroupInfo); } + + // create the table query support structures + createTableQueryInfo(pQInfo); } pQInfo->tsdb = tsdb; @@ -4331,7 +4265,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool // } // the pQuery->window.skey is changed during normalizedFirstQueryRange, so set the newest lastkey value - pQuery->lastKey = pQuery->window.skey; return TSDB_CODE_SUCCESS; } @@ -4399,18 +4332,19 @@ static int64_t queryOnDataBlocks(SQInfo *pQInfo) { break; } } - + assert(pTableQueryInfo != NULL); restoreIntervalQueryRange(pRuntimeEnv, pTableQueryInfo); SDataStatis *pStatis = NULL; - SArray * pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, &blockInfo, &pStatis); + + SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis); TSKEY nextKey = blockInfo.window.skey; if (!isIntervalQuery(pQuery)) { - setExecutionContext(pQInfo, pTableQueryInfo, &pTableQueryInfo->id, pTableQueryInfo->groupIdx, nextKey); + setExecutionContext(pQInfo, &pTableQueryInfo->id, pTableQueryInfo->groupIdx, nextKey); } else { // interval query - setIntervalQueryRange(pTableQueryInfo, pQInfo, nextKey); + setIntervalQueryRange(pQInfo, nextKey); int32_t ret = setAdditionalInfo(pQInfo, &pTableQueryInfo->id, pTableQueryInfo); if (ret != TSDB_CODE_SUCCESS) { @@ -4446,6 +4380,7 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { .numOfCols = pQuery->numOfCols, }; + // todo refactor SArray *g1 = taosArrayInit(1, POINTER_BYTES); SArray *tx = taosArrayInit(1, sizeof(STableId)); @@ -4564,6 +4499,9 @@ static void sequentialTableProcess(SQInfo *pQInfo) { initCtxOutputBuf(pRuntimeEnv); setTagVal(pRuntimeEnv, (STableId*) taosArrayGet(tx, 0), pQInfo->tsdb); + + // here we simply set the first table as current table + pRuntimeEnv->pQuery->current = ((SGroupItem*) taosArrayGet(group, 0))->info; scanAllDataBlocks(pRuntimeEnv); int64_t numOfRes = getNumOfResult(pRuntimeEnv); @@ -4580,8 +4518,6 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } } } else { - createTableQueryInfo(pQInfo); - /* * 1. super table projection query, 2. group-by on normal columns query, 3. ts-comp query * if the subgroup index is larger than 0, results generated by group by tbname,k is existed. @@ -4614,12 +4550,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } SGroupItem *item = taosArrayGet(group, pQInfo->tableIndex); + pQuery->current = item->info; - STableQueryInfo *pInfo = item->info; - if (pInfo->lastKey > 0) { - pQuery->window.skey = pInfo->lastKey; - } - if (!multiTableMultioutputHelper(pQInfo, pQInfo->tableIndex)) { pQInfo->tableIndex++; continue; @@ -4627,7 +4559,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { // SPointInterpoSupporter pointInterpSupporter = {0}; - // TODO handle the limit problem + // TODO handle the limit offset problem if (pQuery->numOfFilterCols == 0 && pQuery->limit.offset > 0) { // skipBlocks(pRuntimeEnv); @@ -4659,8 +4591,6 @@ static void sequentialTableProcess(SQInfo *pQInfo) { * to ensure that, we can reset the query range once query on a meter is completed. */ pQInfo->tableIndex++; - pInfo->lastKey = pQuery->lastKey; - // if the buffer is full or group by each table, we need to jump out of the loop if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL) /*|| isGroupbyEachTable(pQuery->pGroupbyExpr, pSupporter->pSidSet)*/) { @@ -4668,7 +4598,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } } else { // forward query range - pQuery->window.skey = pQuery->lastKey; + pQuery->window.skey = pQuery->current->lastKey; // all data in the result buffer are skipped due to the offset, continue to retrieve data from current meter if (pQuery->rec.rows == 0) { @@ -4853,12 +4783,9 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { qTrace("QInfo:%p query start, qrange:%" PRId64 "-%" PRId64 ", order:%d, forward scan start", pQInfo, pQuery->window.skey, pQuery->window.ekey, pQuery->order.order); - // create the query support structures - createTableQueryInfo(pQInfo); - // do check all qualified data blocks int64_t el = queryOnDataBlocks(pQInfo); - qTrace("QInfo:%p forward scan completed, elapsed time: %lldms, reversed scan start", pQInfo, el); + qTrace("QInfo:%p master scan completed, elapsed time: %lldms, reverse scan start", pQInfo, el); // query error occurred or query is killed, abort current execution if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) { @@ -4909,10 +4836,12 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { * select count(*)/top(field,k)/avg(field name) from table_name [where ts>now-1a]; * select count(*) from table_name group by status_column; */ -static void tableFixedOutputProcess(SQInfo *pQInfo) { +static void tableFixedOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; - SQuery * pQuery = pRuntimeEnv->pQuery; - + + SQuery *pQuery = pRuntimeEnv->pQuery; + pQuery->current = pTableInfo; // set current query table info + scanAllDataBlocks(pRuntimeEnv); finalizeQueryResult(pRuntimeEnv); @@ -4932,10 +4861,12 @@ static void tableFixedOutputProcess(SQInfo *pQInfo) { limitResults(pQInfo); } -static void tableMultiOutputProcess(SQInfo *pQInfo) { +static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; - SQuery * pQuery = pRuntimeEnv->pQuery; - + + SQuery *pQuery = pRuntimeEnv->pQuery; + pQuery->current = pTableInfo; + // for ts_comp query, re-initialized is not allowed if (!isTSCompQuery(pQuery)) { resetCtxOutputBuf(pRuntimeEnv); @@ -4970,15 +4901,15 @@ static void tableMultiOutputProcess(SQInfo *pQInfo) { } qTrace("QInfo:%p vid:%d sid:%d id:%s, skip current result, offset:%" PRId64 ", next qrange:%" PRId64 "-%" PRId64, - pQInfo, pQuery->limit.offset, pQuery->lastKey); + pQInfo, pQuery->limit.offset, pQuery->current->lastKey); resetCtxOutputBuf(pRuntimeEnv); } limitResults(pQInfo); if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) { - qTrace("QInfo:%p query paused due to output limitation, next qrange:%" PRId64 "-%" PRId64, pQInfo, pQuery->lastKey, - pQuery->window.ekey); + qTrace("QInfo:%p query paused due to output limitation, next qrange:%" PRId64 "-%" PRId64, pQInfo, + pQuery->current->lastKey, pQuery->window.ekey); } if (!isTSCompQuery(pQuery)) { @@ -5018,11 +4949,12 @@ static void tableIntervalProcessImpl(SQueryRuntimeEnv *pRuntimeEnv) { } // handle time interval query on table -static void tableIntervalProcess(SQInfo *pQInfo) { +static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { SQueryRuntimeEnv *pRuntimeEnv = &(pQInfo->runtimeEnv); - SQuery * pQuery = pRuntimeEnv->pQuery; int32_t numOfInterpo = 0; + SQuery *pQuery = pRuntimeEnv->pQuery; + pQuery->current = pTableInfo; // skip blocks without load the actual data block from file if no filter condition present skipTimeInterval(pRuntimeEnv); @@ -5131,15 +5063,19 @@ static void tableQueryImpl(SQInfo *pQInfo) { // number of points returned during this query pQuery->rec.rows = 0; int64_t st = taosGetTimestampUs(); - + + assert(pQInfo->groupInfo.numOfTables == 1); + SArray* g = taosArrayGetP(pQInfo->groupInfo.pGroupList, 0); + SGroupItem* item = taosArrayGet(g, 0); + // group by normal column, sliding window query, interval query are handled by interval query processor if (isIntervalQuery(pQuery) || isGroupbyNormalCol(pQuery->pGroupbyExpr)) { // interval (down sampling operation) - tableIntervalProcess(pQInfo); + tableIntervalProcess(pQInfo, item->info); } else if (isFixedOutputQuery(pQuery)) { - tableFixedOutputProcess(pQInfo); + tableFixedOutputProcess(pQInfo, item->info); } else { // diff/add/multiply/subtract/division assert(pQuery->checkBuffer == 1); - tableMultiOutputProcess(pQInfo); + tableMultiOutputProcess(pQInfo, item->info); } // record the total elapsed time @@ -5833,7 +5769,6 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou pQuery->pos = -1; pQuery->window = pQueryMsg->window; - pQuery->lastKey = pQuery->window.skey; if (sem_init(&pQInfo->dataReady, 0, 0) != 0) { qError("QInfo:%p init dataReady sem failed, reason:%s", pQInfo, strerror(errno)); @@ -5910,9 +5845,7 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ if ((code = doInitQInfo(pQInfo, pTSBuf, tsdb, vgId, isSTable)) != TSDB_CODE_SUCCESS) { goto _error; } - - // qTrace("QInfo:%p set query flag and prepare runtime environment completed, ref:%d, wait for schedule", pQInfo, - // pQInfo->refCount); + return code; _error: @@ -6085,34 +6018,35 @@ int32_t qCreateQueryInfo(void *tsdb, int32_t vgId, SQueryTableMsg *pQueryMsg, qi if (pQueryMsg->numOfTables <= 0) { qError("Invalid number of tables to query, numOfTables:%d", pQueryMsg->numOfTables); code = TSDB_CODE_INVALID_QUERY_MSG; - goto _query_over; + goto _over; } if (pTableIdList == NULL || taosArrayGetSize(pTableIdList) == 0) { qError("qmsg:%p, SQueryTableMsg wrong format", pQueryMsg); code = TSDB_CODE_INVALID_QUERY_MSG; - goto _query_over; + goto _over; } SExprInfo *pExprs = NULL; if ((code = createSqlFunctionExprFromMsg(pQueryMsg, &pExprs, pExprMsg, pTagColumnInfo)) != TSDB_CODE_SUCCESS) { - goto _query_over; + goto _over; } SSqlGroupbyExpr *pGroupbyExpr = createGroupbyExprFromMsg(pQueryMsg, pGroupColIndex, &code); if ((pGroupbyExpr == NULL && pQueryMsg->numOfGroupCols != 0) || code != TSDB_CODE_SUCCESS) { - goto _query_over; + goto _over; } bool isSTableQuery = false; STableGroupInfo groupInfo = {0}; + //todo multitable_query?? if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY|TSDB_QUERY_TYPE_TABLE_QUERY)) { isSTableQuery = TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_MULTITABLE_QUERY); STableId *id = taosArrayGet(pTableIdList, 0); if ((code = tsdbGetOneTableGroup(tsdb, id->uid, &groupInfo)) != TSDB_CODE_SUCCESS) { - goto _query_over; + goto _over; } } else if (TSDB_QUERY_HAS_TYPE(pQueryMsg->queryType, TSDB_QUERY_TYPE_STABLE_QUERY)) { isSTableQuery = true; @@ -6129,7 +6063,7 @@ int32_t qCreateQueryInfo(void *tsdb, int32_t vgId, SQueryTableMsg *pQueryMsg, qi numOfGroupByCols); if (groupInfo.numOfTables == 0) { // no qualified tables no need to do query code = TSDB_CODE_SUCCESS; - goto _query_over; + goto _over; } } else { assert(0); @@ -6138,11 +6072,12 @@ int32_t qCreateQueryInfo(void *tsdb, int32_t vgId, SQueryTableMsg *pQueryMsg, qi (*pQInfo) = createQInfoImpl(pQueryMsg, pGroupbyExpr, pExprs, &groupInfo, pTagColumnInfo); if ((*pQInfo) == NULL) { code = TSDB_CODE_SERV_OUT_OF_MEMORY; + goto _over; } code = initQInfo(pQueryMsg, tsdb, vgId, *pQInfo, isSTableQuery); -_query_over: +_over: tfree(tagCond); tfree(tbnameCond); taosArrayDestroy(pTableIdList); @@ -6318,12 +6253,10 @@ static void buildTagQueryResult(SQInfo* pQInfo) { for(int32_t j = 0; j < pQuery->numOfOutput; ++j) { // todo check the return value, refactor codes if (pExprInfo[j].base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) { - tsdbGetTableName(pQInfo->tsdb, &item->id, &data); + data = tsdbGetTableName(pQInfo->tsdb, &item->id, &bytes); char* dst = pQuery->sdata[j]->data + i * (TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE); - STR_WITH_MAXSIZE_TO_VARSTR(dst, data, TSDB_TABLE_NAME_LEN); - tfree(data); - + memcpy(dst, data, varDataTLen(data)); } else {// todo refactor, return the true length of binary|nchar data tsdbGetTableTagVal(pQInfo->tsdb, &item->id, pExprInfo[j].base.colInfo.colId, &type, &bytes, &data); assert(bytes == pExprInfo[j].bytes && type == pExprInfo[j].type); diff --git a/src/query/src/tvariant.c b/src/query/src/tvariant.c index 0c4a2b67578c8f59643941d5f5ac3caaceebce54..51d3286722e4817a9c034d000fce9ccbe102353a 100644 --- a/src/query/src/tvariant.c +++ b/src/query/src/tvariant.c @@ -389,6 +389,7 @@ static int32_t toBinary(tVariant *pVariant, char **pDest, int32_t *pDestSize) { return 0; } +// todo handle the error static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) { char tmpBuf[40] = {0}; @@ -424,7 +425,12 @@ static int32_t toNchar(tVariant *pVariant, char **pDest, int32_t *pDestSize) { pVariant->wpz = (wchar_t *)tmp; } else { - taosMbsToUcs4(pDst, nLen, *pDest, (nLen + 1) * TSDB_NCHAR_SIZE, NULL); + size_t output = -1; + taosMbsToUcs4(pDst, nLen, *pDest, (nLen + 1) * TSDB_NCHAR_SIZE, &output); + + if (pDestSize != NULL) { + *pDestSize = output; + } } return 0; @@ -779,7 +785,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, char type) { } case TSDB_DATA_TYPE_NCHAR: { if (pVariant->nType == TSDB_DATA_TYPE_NULL) { - *(uint32_t *)payload = TSDB_DATA_NCHAR_NULL; + *(uint32_t *) payload = TSDB_DATA_NCHAR_NULL; } else { if (pVariant->nType != TSDB_DATA_TYPE_NCHAR) { toNchar(pVariant, &payload, &pVariant->nLen); diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index a2333566f194515efcc90980396262b23cf584a6..ca4b211be880e361f5433b932afbfa8553635abd 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -55,9 +55,8 @@ typedef struct { char secret[TSDB_KEY_LEN]; // secret for the link char ckey[TSDB_KEY_LEN]; // ciphering key - void (*cfp)(SRpcMsg *); + void (*cfp)(SRpcMsg *, SRpcIpSet *); int (*afp)(char *user, char *spi, char *encrypt, char *secret, char *ckey); - void (*ufp)(void *ahandle, SRpcIpSet *pIpSet); void *idPool; // handle to ID pool void *tmrCtrl; // handle to timer @@ -222,7 +221,6 @@ void *rpcOpen(const SRpcInit *pInit) { if (pInit->secret) strcpy(pRpc->secret, pInit->secret); if (pInit->ckey) strcpy(pRpc->ckey, pInit->ckey); pRpc->spi = pInit->spi; - pRpc->ufp = pInit->ufp; pRpc->cfp = pInit->cfp; pRpc->afp = pInit->afp; @@ -900,10 +898,11 @@ static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) { memcpy(pContext->pRsp, pMsg, sizeof(SRpcMsg)); } else { // for asynchronous API - if (pRpc->ufp && (pContext->ipSet.inUse != pContext->oldInUse || pContext->redirect)) - (*pRpc->ufp)(pContext->ahandle, &pContext->ipSet); // notify the update of ipSet + SRpcIpSet *pIpSet = NULL; + if (pContext->ipSet.inUse != pContext->oldInUse || pContext->redirect) + pIpSet = &pContext->ipSet; - (*pRpc->cfp)(pMsg); + (*pRpc->cfp)(pMsg, pIpSet); } // free the request message @@ -924,7 +923,7 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) { if ( rpcIsReq(pHead->msgType) ) { rpcMsg.handle = pConn; taosTmrReset(rpcProcessProgressTimer, tsRpcTimer/2, pConn, pRpc->tmrCtrl, &pConn->pTimer); - (*(pRpc->cfp))(&rpcMsg); + (*(pRpc->cfp))(&rpcMsg, NULL); } else { // it's a response SRpcReqContext *pContext = pConn->pContext; diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index 2aa1f0e4e94c0b944b7ac0553c1188fd482c7111..ea1ebb5974691f9bd6a1244e6ad06de464d2b307 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -31,22 +31,16 @@ typedef struct { void *pRpc; } SInfo; -static void processResponse(SRpcMsg *pMsg) { +static void processResponse(SRpcMsg *pMsg, SRpcIpSet *pIpSet) { SInfo *pInfo = (SInfo *)pMsg->handle; tTrace("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen, pMsg->code); - rpcFreeCont(pMsg->pCont); + if (pIpSet) pInfo->ipSet = *pIpSet; + rpcFreeCont(pMsg->pCont); sem_post(&pInfo->rspSem); } -static void processUpdateIpSet(void *handle, SRpcIpSet *pIpSet) { - SInfo *pInfo = (SInfo *)handle; - - tTrace("thread:%d, ip set is changed, index:%d", pInfo->index, pIpSet->inUse); - pInfo->ipSet = *pIpSet; -} - static int tcount = 0; static void *sendRequest(void *param) { @@ -99,7 +93,6 @@ int main(int argc, char *argv[]) { rpcInit.label = "APP"; rpcInit.numOfThreads = 1; rpcInit.cfp = processResponse; - rpcInit.ufp = processUpdateIpSet; rpcInit.sessions = 100; rpcInit.idleTime = tsShellActivityTimer*1000; rpcInit.user = "michael"; diff --git a/src/rpc/test/rsclient.c b/src/rpc/test/rsclient.c index 683cbb590a5d198cd9d1c220c1a4fd6b544aa1fc..3b19d7a9ea5561e3641fbbbf2deae99c5794df87 100644 --- a/src/rpc/test/rsclient.c +++ b/src/rpc/test/rsclient.c @@ -32,12 +32,6 @@ typedef struct { void *pRpc; } SInfo; -static void processUpdateIpSet(void *handle, SRpcIpSet *pIpSet) { - SInfo *pInfo = (SInfo *)handle; - - tTrace("thread:%d, ip set is changed, index:%d", pInfo->index, pIpSet->inUse); - pInfo->ipSet = *pIpSet; -} static int tcount = 0; static int terror = 0; @@ -100,8 +94,6 @@ int main(int argc, char *argv[]) { rpcInit.localPort = 0; rpcInit.label = "APP"; rpcInit.numOfThreads = 1; - // rpcInit.cfp = processResponse; - rpcInit.ufp = processUpdateIpSet; rpcInit.sessions = 100; rpcInit.idleTime = tsShellActivityTimer*1000; rpcInit.user = "michael"; diff --git a/src/rpc/test/rserver.c b/src/rpc/test/rserver.c index 9f781ef276dc2788ae5bf7470431465649d7e542..958d099027f2072b82aee45fe302f0042c1fd8aa 100644 --- a/src/rpc/test/rserver.c +++ b/src/rpc/test/rserver.c @@ -113,7 +113,7 @@ int retrieveAuthInfo(char *meterId, char *spi, char *encrypt, char *secret, char return ret; } -void processRequestMsg(SRpcMsg *pMsg) { +void processRequestMsg(SRpcMsg *pMsg, SRpcIpSet *pIpSet) { SRpcMsg *pTemp; pTemp = taosAllocateQitem(sizeof(SRpcMsg)); diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h index f179ef6ef98aff08f4829bba5a6ad9edc1e49923..57dd4b9630ccbc84b75d6facb7c1d9ed448d880c 100644 --- a/src/tsdb/inc/tsdbMain.h +++ b/src/tsdb/inc/tsdbMain.h @@ -81,7 +81,6 @@ typedef struct { // ---------- TSDB TABLE DEFINITION typedef struct STable { int8_t type; - char * name; STableId tableId; int64_t superUid; // Super table UID int32_t sversion; @@ -96,9 +95,10 @@ typedef struct STable { TSKEY lastKey; // lastkey inserted in this table, initialized as 0, TODO: make a structure struct STable *next; // TODO: remove the next struct STable *prev; + tstr * name; // NOTE: there a flexible string here } STable; -#define TSDB_GET_TABLE_LAST_KEY(pTable) ((pTable)->lastKey) +#define TSDB_GET_TABLE_LAST_KEY(tb) ((tb)->lastKey) void * tsdbEncodeTable(STable *pTable, int *contLen); STable *tsdbDecodeTable(void *cont, int contLen); @@ -121,6 +121,12 @@ typedef struct { int maxCols; } STsdbMeta; +// element put in skiplist for each table +typedef struct STableIndexElem { + STsdbMeta* pMeta; + STable* pTable; +} STableIndexElem; + STsdbMeta *tsdbInitMeta(char *rootDir, int32_t maxTables); int32_t tsdbFreeMeta(STsdbMeta *pMeta); STSchema * tsdbGetTableSchema(STsdbMeta *pMeta, STable *pTable); @@ -150,7 +156,7 @@ int32_t tsdbDropTableImpl(STsdbMeta *pMeta, STableId tableId); STable *tsdbIsValidTableToInsert(STsdbMeta *pMeta, STableId tableId); // int32_t tsdbInsertRowToTableImpl(SSkipListNode *pNode, STable *pTable); STable *tsdbGetTableByUid(STsdbMeta *pMeta, int64_t uid); -char * getTupleKey(const void *data); +char *getTSTupleKey(const void * data); typedef struct { int blockId; diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 59e17edd5e17e6ced3f97711580b58e605ecee42..4ab0149b6061d321279e00fdd93e1bc943bf7826 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -809,7 +809,7 @@ static int32_t tdInsertRowToTable(STsdbRepo *pRepo, SDataRow row, STable *pTable if (pTable->mem == NULL) { pTable->mem = (SMemTable *)calloc(1, sizeof(SMemTable)); if (pTable->mem == NULL) return -1; - pTable->mem->pData = tSkipListCreate(5, TSDB_DATA_TYPE_TIMESTAMP, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], 0, 0, 0, getTupleKey); + pTable->mem->pData = tSkipListCreate(5, TSDB_DATA_TYPE_TIMESTAMP, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], 0, 0, 0, getTSTupleKey); pTable->mem->keyFirst = INT64_MAX; pTable->mem->keyLast = 0; } @@ -832,7 +832,7 @@ static int32_t tdInsertRowToTable(STsdbRepo *pRepo, SDataRow row, STable *pTable if (pTable->mem == NULL) { pTable->mem = (SMemTable *)calloc(1, sizeof(SMemTable)); if (pTable->mem == NULL) return -1; - pTable->mem->pData = tSkipListCreate(5, TSDB_DATA_TYPE_TIMESTAMP, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], 0, 0, 0, getTupleKey); + pTable->mem->pData = tSkipListCreate(5, TSDB_DATA_TYPE_TIMESTAMP, TYPE_BYTES[TSDB_DATA_TYPE_TIMESTAMP], 0, 0, 0, getTSTupleKey); pTable->mem->keyFirst = INT64_MAX; pTable->mem->keyLast = 0; } diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 0828db7f00d2e869abf2aef3fe02c926ff77be4f..c7e260ae50f8c5b084b6553c6ac152b9848d083e 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -1,6 +1,4 @@ #include - -// #include "taosdef.h" #include "tskiplist.h" #include "tsdb.h" #include "taosdef.h" @@ -10,6 +8,8 @@ #define TSDB_SUPER_TABLE_SL_LEVEL 5 // TODO: may change here #define TSDB_META_FILE_NAME "META" +const int32_t DEFAULT_TAG_INDEX_COLUMN = 0; // skip list built based on the first column of tags + static int tsdbFreeTable(STable *pTable); static int32_t tsdbCheckTableCfg(STableCfg *pCfg); static int tsdbAddTableToMeta(STsdbMeta *pMeta, STable *pTable, bool addIdx); @@ -39,11 +39,12 @@ void *tsdbEncodeTable(STable *pTable, int *contLen) { void *ptr = ret; T_APPEND_MEMBER(ptr, pTable, STable, type); - // Encode name - *(int *)ptr = strlen(pTable->name); + // Encode name, todo refactor + *(int *)ptr = varDataLen(pTable->name); ptr = (char *)ptr + sizeof(int); - memcpy(ptr, pTable->name, strlen(pTable->name)); - ptr = (char *)ptr + strlen(pTable->name); + memcpy(ptr, varDataVal(pTable->name), varDataLen(pTable->name)); + ptr = (char *)ptr + varDataLen(pTable->name); + T_APPEND_MEMBER(ptr, &(pTable->tableId), STableId, uid); T_APPEND_MEMBER(ptr, &(pTable->tableId), STableId, tid); T_APPEND_MEMBER(ptr, pTable, STable, superUid); @@ -79,9 +80,12 @@ STable *tsdbDecodeTable(void *cont, int contLen) { T_READ_MEMBER(ptr, int8_t, pTable->type); int len = *(int *)ptr; ptr = (char *)ptr + sizeof(int); - pTable->name = calloc(1, len + 1); + pTable->name = calloc(1, len + VARSTR_HEADER_SIZE); if (pTable->name == NULL) return NULL; - memcpy(pTable->name, ptr, len); + + varDataSetLen(pTable->name, len); + memcpy(pTable->name->data, ptr, len); + ptr = (char *)ptr + len; T_READ_MEMBER(ptr, int64_t, pTable->tableId.uid); T_READ_MEMBER(ptr, int32_t, pTable->tableId.tid); @@ -105,8 +109,13 @@ void tsdbFreeEncode(void *cont) { } static char* getTagIndexKey(const void* pData) { - STable* table = *(STable**) pData; - return getTupleKey(table->tagVal); + STableIndexElem* elem = (STableIndexElem*) pData; + + SDataRow row = elem->pTable->tagVal; + STSchema* pSchema = tsdbGetTableTagSchema(elem->pMeta, elem->pTable); + STColumn* pCol = &pSchema->columns[DEFAULT_TAG_INDEX_COLUMN]; + + return tdGetRowDataOfCol(row, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset); } int tsdbRestoreTable(void *pHandle, void *cont, int contLen) { @@ -225,36 +234,28 @@ STSchema * tsdbGetTableTagSchema(STsdbMeta *pMeta, STable *pTable) { } } +// todo refactor table name definition int32_t tsdbGetTableTagVal(TsdbRepoT* repo, STableId* id, int32_t colId, int16_t* type, int16_t* bytes, char** val) { STsdbMeta* pMeta = tsdbGetMeta(repo); STable* pTable = tsdbGetTableByUid(pMeta, id->uid); STSchema* pSchema = tsdbGetTableTagSchema(pMeta, pTable); - STColumn* pCol = NULL; - int32_t offset = 0; + for(int32_t col = 0; col < schemaNCols(pSchema); ++col) { STColumn* p = schemaColAt(pSchema, col); if (p->colId == colId) { pCol = p; break; } - - if (p->type == TSDB_DATA_TYPE_BINARY || p->type == TSDB_DATA_TYPE_NCHAR) { - offset += sizeof(int32_t); - } else { - offset += p->bytes; - } } if (pCol == NULL) { return -1; // No matched tags. Maybe the modification of tags has not been done yet. } - assert(pCol != NULL); - SDataRow row = (SDataRow)pTable->tagVal; - char* d = tdGetRowDataOfCol(row, pCol->type, TD_DATA_ROW_HEAD_SIZE + offset); + char* d = tdGetRowDataOfCol(row, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset); *val = d; *type = pCol->type; @@ -263,15 +264,22 @@ int32_t tsdbGetTableTagVal(TsdbRepoT* repo, STableId* id, int32_t colId, int16_t return TSDB_CODE_SUCCESS; } -int32_t tsdbGetTableName(TsdbRepoT *repo, STableId* id, char** name) { +char* tsdbGetTableName(TsdbRepoT *repo, const STableId* id, int16_t* bytes) { STsdbMeta* pMeta = tsdbGetMeta(repo); STable* pTable = tsdbGetTableByUid(pMeta, id->uid); - *name = strndup(pTable->name, TSDB_TABLE_NAME_LEN); - if (*name == NULL) { - return -1; + if (pTable == NULL) { + if (bytes != NULL) { + *bytes = 0; + } + + return NULL; } else { - return 0; + if (bytes != NULL) { + *bytes = varDataLen(pTable->name); + } + + return (char*) pTable->name; } } @@ -296,7 +304,11 @@ int32_t tsdbCreateTableImpl(STsdbMeta *pMeta, STableCfg *pCfg) { super->schema = tdDupSchema(pCfg->schema); super->tagSchema = tdDupSchema(pCfg->tagSchema); super->tagVal = NULL; - super->name = strdup(pCfg->sname); + + // todo refactor extract method + size_t size = strnlen(pCfg->sname, TSDB_TABLE_NAME_LEN); + super->name = malloc(size + VARSTR_HEADER_SIZE); + STR_WITH_SIZE_TO_VARSTR(super->name, pCfg->sname, size); // index the first tag column STColumn* pColSchema = schemaColAt(super->tagSchema, 0); @@ -322,7 +334,11 @@ int32_t tsdbCreateTableImpl(STsdbMeta *pMeta, STableCfg *pCfg) { } table->tableId = pCfg->tableId; - table->name = strdup(pCfg->name); + + size_t size = strnlen(pCfg->name, TSDB_TABLE_NAME_LEN); + table->name = malloc(size + VARSTR_HEADER_SIZE); + STR_WITH_SIZE_TO_VARSTR(table->name, pCfg->name, size); + table->lastKey = 0; if (IS_CREATE_STABLE(pCfg)) { // TSDB_CHILD_TABLE table->type = TSDB_CHILD_TABLE; @@ -513,11 +529,14 @@ static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable) { // NOTE: do not allocate the space for key, since in each skip list node, only keep the pointer to pTable, not the // actual key value, and the key value will be retrieved during query through the pTable and getTagIndexKey function - SSkipListNode* pNode = calloc(1, headSize + POINTER_BYTES); + SSkipListNode* pNode = calloc(1, headSize + sizeof(STableIndexElem)); pNode->level = level; SSkipList* list = pSTable->pIndex; - memcpy(SL_GET_NODE_DATA(pNode), &pTable, POINTER_BYTES); + STableIndexElem* elem = (STableIndexElem*) (SL_GET_NODE_DATA(pNode)); + + elem->pTable = pTable; + elem->pMeta = pMeta; tSkipListPut(list, pNode); return 0; @@ -529,7 +548,10 @@ static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) { STable* pSTable = tsdbGetTableByUid(pMeta, pTable->superUid); assert(pSTable != NULL); - char* key = dataRowTuple(pTable->tagVal); // key + STSchema* pSchema = tsdbGetTableTagSchema(pMeta, pTable); + STColumn* pCol = &pSchema->columns[DEFAULT_TAG_INDEX_COLUMN]; + + char* key = tdGetRowDataOfCol(pTable->tagVal, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset); bool ret = tSkipListRemove(pSTable->pIndex, key); assert(ret); @@ -539,7 +561,7 @@ static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) { static int tsdbEstimateTableEncodeSize(STable *pTable) { int size = 0; size += T_MEMBER_SIZE(STable, type); - size += sizeof(int) + strlen(pTable->name); + size += sizeof(int) + varDataLen(pTable->name); size += T_MEMBER_SIZE(STable, tableId); size += T_MEMBER_SIZE(STable, superUid); size += T_MEMBER_SIZE(STable, sversion); @@ -556,8 +578,7 @@ static int tsdbEstimateTableEncodeSize(STable *pTable) { return size; } -char *getTupleKey(const void * data) { +char *getTSTupleKey(const void * data) { SDataRow row = (SDataRow)data; - return POINTER_SHIFT(row, TD_DATA_ROW_HEAD_SIZE); } \ No newline at end of file diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 008ad223ed718de420bcb770b6dec4d736afb6e4..49b6d2984f00830c1b9330fe2daede4d9994adad 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -620,9 +620,13 @@ static void filterDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInf if (pCol->info.type != TSDB_DATA_TYPE_BINARY && pCol->info.type != TSDB_DATA_TYPE_NCHAR) { memmove(pCol->pData, src->pData + bytes * start, bytes * pQueryHandle->realNumOfRows); } else { // handle the var-string + char* dst = pCol->pData; + + // todo refactor, only copy one-by-one for(int32_t k = start; k < pQueryHandle->realNumOfRows + start; ++k) { char* p = tdGetColDataOfRow(src, k); - memcpy(pCol->pData + k * bytes, p, varDataTLen(p)); // todo refactor + memcpy(dst, p, varDataTLen(p)); + dst += bytes; } } @@ -1076,17 +1080,13 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY } assert(offset != -1); // todo handle error + void *value = tdGetRowDataOfCol(row, pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + offset); if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) { - void *value = tdGetRowDataOfCol(row, pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + offset); memcpy(pData, value, varDataTLen(value)); - - offset += sizeof(int32_t); } else { - memcpy(pData, dataRowTuple(row) + offset, pColInfo->info.bytes); - offset += pColInfo->info.bytes; + memcpy(pData, value, pColInfo->info.bytes); } - } numOfRows++; @@ -1225,8 +1225,8 @@ static int32_t getAllTableIdList(STable* pSuperTable, SArray* list) { while (tSkipListIterNext(iter)) { SSkipListNode* pNode = tSkipListIterGet(iter); - STable* t = *(STable**)SL_GET_NODE_DATA(pNode); - taosArrayPush(list, &t->tableId); + STableIndexElem* elem = (STableIndexElem*)(SL_GET_NODE_DATA((SSkipListNode*) pNode)); + taosArrayPush(list, &elem->pTable->tableId); } tSkipListDestroyIter(iter); @@ -1235,6 +1235,7 @@ static int32_t getAllTableIdList(STable* pSuperTable, SArray* list) { /** * convert the result pointer to table id instead of table object pointer + * todo remove it by using callback function to change the final result in-time. * @param pRes */ static void convertQueryResult(SArray* pRes, SArray* pTableList) { @@ -1244,8 +1245,8 @@ static void convertQueryResult(SArray* pRes, SArray* pTableList) { size_t size = taosArrayGetSize(pTableList); for (int32_t i = 0; i < size; ++i) { // todo speedup by using reserve space. - STable* pTable = taosArrayGetP(pTableList, i); - taosArrayPush(pRes, &pTable->tableId); + STableIndexElem* elem = taosArrayGet(pTableList, i); + taosArrayPush(pRes, &elem->pTable->tableId); } } @@ -1309,7 +1310,12 @@ void filterPrepare(void* expr, void* param) { pInfo->q = (char*) pCond->arr; } else { pInfo->q = calloc(1, pSchema->bytes); - tVariantDump(pCond, pInfo->q, pSchema->type); + if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { + tVariantDump(pCond, varDataVal(pInfo->q), pSchema->type); + varDataSetLen(pInfo->q, pCond->nLen); // the length may be changed after dump, so assign its value after dump + } else { + tVariantDump(pCond, pInfo->q, pSchema->type); + } } } @@ -1341,16 +1347,16 @@ int32_t tableGroupComparFn(const void *p1, const void *p2, const void *param) { int32_t bytes = 0; if (colIndex == TSDB_TBNAME_COLUMN_INDEX) { - f1 = pTable1->name; - f2 = pTable2->name; + f1 = (char*) pTable1->name; + f2 = (char*) pTable2->name; type = TSDB_DATA_TYPE_BINARY; - bytes = TSDB_TABLE_NAME_LEN; + bytes = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE; } else { - f1 = dataRowTuple(pTable1->tagVal); - f2 = dataRowTuple(pTable2->tagVal); - - type = schemaColAt(pTableGroupSupp->pTagSchema, colIndex)->type; - bytes = schemaColAt(pTableGroupSupp->pTagSchema, colIndex)->bytes; + STColumn* pCol = schemaColAt(pTableGroupSupp->pTagSchema, colIndex); + bytes = pCol->bytes; + + f1 = tdGetRowDataOfCol(pTable1->tagVal, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset); + f2 = tdGetRowDataOfCol(pTable2->tagVal, pCol->type, TD_DATA_ROW_HEAD_SIZE + pCol->offset); } int32_t ret = doCompare(f1, f2, type, bytes); @@ -1428,24 +1434,20 @@ SArray* createTableGroup(SArray* pTableList, STSchema* pTagSchema, SColIndex* pC bool tSkipListNodeFilterCallback(const void* pNode, void* param) { tQueryInfo* pInfo = (tQueryInfo*) param; - - STable* pTable = *(STable**)(SL_GET_NODE_DATA((SSkipListNode*)pNode)); + + STableIndexElem* elem = (STableIndexElem*)(SL_GET_NODE_DATA((SSkipListNode*)pNode)); char* val = NULL; int8_t type = pInfo->sch.type; if (pInfo->colIndex == TSDB_TBNAME_COLUMN_INDEX) { - val = pTable->name; + val = (char*) elem->pTable->name; type = TSDB_DATA_TYPE_BINARY; } else { STSchema* pTSchema = (STSchema*) pInfo->param; // todo table schema is identical to stable schema?? int32_t offset = pTSchema->columns[pInfo->colIndex].offset; - if (pInfo->sch.type == TSDB_DATA_TYPE_BINARY || pInfo->sch.type == TSDB_DATA_TYPE_NCHAR) { - val = tdGetRowDataOfCol(pTable->tagVal, pInfo->sch.type, TD_DATA_ROW_HEAD_SIZE + offset); - } else { - val = dataRowTuple(pTable->tagVal) + offset; - } + val = tdGetRowDataOfCol(elem->pTable->tagVal, pInfo->sch.type, TD_DATA_ROW_HEAD_SIZE + offset); } int32_t ret = 0; @@ -1456,8 +1458,6 @@ bool tSkipListNodeFilterCallback(const void* pNode, void* param) { ret = pInfo->compare(val, pInfo->q); } } else { -// tVariant t = {0}; -// tVariantCreateFromBinary(&t, val, (uint32_t)pInfo->sch.bytes, type); ret = pInfo->compare(val, pInfo->q); } @@ -1502,7 +1502,7 @@ static int32_t doQueryTableList(STable* pSTable, SArray* pRes, tExprNode* pExpr) .pExtInfo = pSTable->tagSchema, }; - SArray* pTableList = taosArrayInit(8, POINTER_BYTES); + SArray* pTableList = taosArrayInit(8, sizeof(STableIndexElem)); tExprTreeTraverse(pExpr, pSTable->pIndex, pTableList, &supp); tExprTreeDestroy(&pExpr, destroyHelper); diff --git a/src/util/src/talgo.c b/src/util/src/talgo.c index 32978453fc676cfb8d47fca10483471d275ad7e9..f343912cde267855bc083fbde47f380e9609742e 100644 --- a/src/util/src/talgo.c +++ b/src/util/src/talgo.c @@ -144,11 +144,11 @@ static void tqsortImpl(void *src, int32_t start, int32_t end, size_t size, const } if (leftPartEnd > start) { - tqsortImpl(src, size, start, leftPartEnd, param, comparFn, buf); + tqsortImpl(src, start, leftPartEnd, size, param, comparFn, buf); } if (rightPartStart < end) { - tqsortImpl(src, size, rightPartStart, end, param, comparFn, buf); + tqsortImpl(src, rightPartStart, end, size, param, comparFn, buf); } } diff --git a/src/util/src/tcompare.c b/src/util/src/tcompare.c index 2d6ae13f97b8394074eb265ac1a78a34fe5cf527..0abf1e6be39262ec2753b5f544300fe0f770ce88 100644 --- a/src/util/src/tcompare.c +++ b/src/util/src/tcompare.c @@ -60,33 +60,36 @@ int32_t compareDoubleVal(const void *pLeft, const void *pRight) { } } -int32_t compareStrVal(const void *pLeft, const void *pRight) { - return (int32_t)strcmp(pLeft, pRight); +int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) { + int32_t len1 = varDataLen(pLeft); + int32_t len2 = varDataLen(pRight); + + if (len1 != len2) { + return len1 > len2? 1:-1; + } else { + int32_t ret = strncmp(varDataVal(pLeft), varDataVal(pRight), len1); + if (ret == 0) { + return 0; + } else { + return ret > 0 ? 1:-1; + } + } } -int32_t compareWStrVal(const void *pLeft, const void *pRight) { - // SSkipListKey *pL = (SSkipListKey *)pLeft; - // SSkipListKey *pR = (SSkipListKey *)pRight; - // - // if (pL->nLen == 0 && pR->nLen == 0) { - // return 0; - // } - // - // // handle only one-side bound compare situation, there is only lower bound or only upper bound - // if (pL->nLen == -1) { - // return 1; // no lower bound, lower bound is minimum, always return -1; - // } else if (pR->nLen == -1) { - // return -1; // no upper bound, upper bound is maximum situation, always return 1; - // } - // - // int32_t ret = wcscmp(((SSkipListKey *)pLeft)->wpz, ((SSkipListKey *)pRight)->wpz); - // - // if (ret == 0) { - // return 0; - // } else { - // return ret > 0 ? 1 : -1; - // } - return 0; +int32_t compareLenPrefixedWStr(const void *pLeft, const void *pRight) { + int32_t len1 = varDataLen(pLeft); + int32_t len2 = varDataLen(pRight); + + if (len1 != len2) { + return len1 > len2? 1:-1; + } else { + int32_t ret = wcsncmp(varDataVal(pLeft), varDataVal(pRight), len1); + if (ret == 0) { + return 0; + } else { + return ret > 0 ? 1 : -1; + } + } } /* @@ -267,7 +270,7 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { } else if (optr == TSDB_RELATION_IN) { comparFn = compareFindStrInArray; } else { /* normal relational comparFn */ - comparFn = compareStrVal; + comparFn = compareLenPrefixedStr; } break; @@ -277,7 +280,7 @@ __compar_fn_t getComparFunc(int32_t type, int32_t optr) { if (optr == TSDB_RELATION_LIKE) { comparFn = compareWStrPatternComp; } else { - comparFn = compareWStrVal; + comparFn = compareLenPrefixedWStr; } break; @@ -296,6 +299,7 @@ __compar_fn_t getKeyComparFunc(int32_t keyType) { switch (keyType) { case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_BOOL: comparFn = compareInt8Val; break; case TSDB_DATA_TYPE_SMALLINT: @@ -308,21 +312,17 @@ __compar_fn_t getKeyComparFunc(int32_t keyType) { case TSDB_DATA_TYPE_TIMESTAMP: comparFn = compareInt64Val; break; - case TSDB_DATA_TYPE_BOOL: - comparFn = compareInt32Val; - break; - case TSDB_DATA_TYPE_FLOAT: case TSDB_DATA_TYPE_DOUBLE: comparFn = compareDoubleVal; break; case TSDB_DATA_TYPE_BINARY: - comparFn = compareStrVal; + comparFn = compareLenPrefixedStr; break; case TSDB_DATA_TYPE_NCHAR: - comparFn = compareWStrVal; + comparFn = compareLenPrefixedWStr; break; default: @@ -349,13 +349,20 @@ int32_t doCompare(const char* f1, const char* f2, int32_t type, size_t size) { } return (ret < 0) ? -1 : 1; } - default: { - int32_t ret = strncmp(f1, f2, (size_t)size); - if (ret == 0) { - return ret; - } + default: { // todo refactor + tstr* t1 = (tstr*) f1; + tstr* t2 = (tstr*) f2; - return (ret < 0) ? -1 : 1; + if (t1->len != t2->len) { + return t1->len > t2->len? 1:-1; + } else { + int32_t ret = strncmp(t1->data, t2->data, t1->len); + if (ret == 0) { + return 0; + } else { + return ret < 0? -1:1; + } + } } } } diff --git a/tests/pytest/import_merge/importDataH2.py b/tests/pytest/import_merge/importDataH2.py index 73a412fb8046ff1f6baf9c42d39221345c22fbee..b5e53d862e75935a66303f096374374a298b6a25 100644 --- a/tests/pytest/import_merge/importDataH2.py +++ b/tests/pytest/import_merge/importDataH2.py @@ -46,16 +46,16 @@ class TDTestCase: self.maxrows) tdLog.info("================= step2") - tdLog.info("import %d sequential data" % (self.maxrows / 2)) + tdLog.info("import %d sequential data" % (self.maxrows // 2)) startTime = self.startTime sqlcmd = ['import into tb1 values'] - for rid in range(1, self.maxrows / 2 + 1): + for rid in range(1, self.maxrows // 2 + 1): sqlcmd.append('(%ld, %d)' % (startTime + rid, rid)) tdSql.execute(" ".join(sqlcmd)) tdLog.info("================= step3") tdSql.query('select * from tb1') - tdSql.checkRows(self.maxrows / 2) + tdSql.checkRows(self.maxrows // 2) tdLog.info("================= step4") tdDnodes.stop(1) @@ -70,7 +70,7 @@ class TDTestCase: tdLog.info("================= step7") tdSql.execute('reset query cache') tdSql.query('select * from tb1 order by ts desc') - tdSql.checkRows(self.maxrows / 2 + 1) + tdSql.checkRows(self.maxrows // 2 + 1) tdLog.info("================= step8") tdLog.info("import 10 data in batch before") @@ -83,7 +83,7 @@ class TDTestCase: tdLog.info("================= step9") tdSql.execute('reset query cache') tdSql.query('select * from tb1 order by ts desc') - tdSql.checkRows(self.maxrows / 2 + 11) + tdSql.checkRows(self.maxrows // 2 + 11) def stop(self): tdSql.close() diff --git a/tests/pytest/import_merge/importDataSub.py b/tests/pytest/import_merge/importDataSub.py index 2359ca214fd7313b93802c3c34a0406f98745647..e946a254c205b94c6a89702d7a037960c124ab99 100644 --- a/tests/pytest/import_merge/importDataSub.py +++ b/tests/pytest/import_merge/importDataSub.py @@ -46,16 +46,17 @@ class TDTestCase: self.maxrows) tdLog.info("================= step2") - tdLog.info("import %d sequential data" % (self.maxrows / 2)) + tdLog.info("import %d sequential data" % (self.maxrows // 2)) startTime = self.startTime sqlcmd = ['import into tb1 values'] - for rid in range(1, self.maxrows / 2 + 1): + for rid in range(1, self.maxrows // 2 + 1): sqlcmd.append('(%ld, %d)' % (startTime + rid, rid)) + tdSql.execute(" ".join(sqlcmd)) tdLog.info("================= step3") tdSql.query('select * from tb1') - tdSql.checkRows(self.maxrows / 2) + tdSql.checkRows(self.maxrows // 2) tdLog.info("================= step4") tdDnodes.stop(1) @@ -73,7 +74,7 @@ class TDTestCase: tdLog.info("================= step9") tdSql.execute('reset query cache') tdSql.query('select * from tb1 order by ts desc') - tdSql.checkRows(self.maxrows / 2) + tdSql.checkRows(self.maxrows // 2) def stop(self): tdSql.close() diff --git a/tests/pytest/import_merge/importToCommit.py b/tests/pytest/import_merge/importToCommit.py index 7a408bcdce52f8c699e751057554eb26fe70c4dc..3684dde049ff0185c3bde7ff50886c3e24ce55fe 100644 --- a/tests/pytest/import_merge/importToCommit.py +++ b/tests/pytest/import_merge/importToCommit.py @@ -33,7 +33,7 @@ class TDTestCase: tdDnodes.start(1) tdSql.execute('reset query cache') tdSql.execute('drop database if exists db') - tdSql.execute('create database db cache 512 tables 10') + tdSql.execute('create database db cache 512 maxtables 10') tdSql.execute('use db') tdLog.info("================= step1") diff --git a/tests/pytest/smoketest.sh b/tests/pytest/smoketest.sh index e51e8792ec55f31bd006701cb84b71fc822547e5..853ebe1d76f4c3dfcc764dfff83397f247748929 100755 --- a/tests/pytest/smoketest.sh +++ b/tests/pytest/smoketest.sh @@ -29,88 +29,17 @@ python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f table/db_table.py python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importDataLastTO.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importDataLastT.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importDataTO.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importDataT.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importHeadOverlap.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importHeadPartOverlap.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importHORestart.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importHPORestart.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importHRestart.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importLastSub.py -python3 ./test.py $1 -s && sleep 1 - -python3 ./test.py $1 -f import_merge/importBlock1HO.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock1HPO.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock1H.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock1S.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock1Sub.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock1TO.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock1TPO.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock1T.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock2HO.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock2HPO.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock2H.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock2S.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock2Sub.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock2TO.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock2TPO.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlock2T.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importBlockbetween.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importCacheFileSub.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importCacheFileTO.py -python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f import_merge/importCacheFileT.py python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f import_merge/importDataLastSub.py python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f import_merge/importHead.py python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importLastTO.py -python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f import_merge/importLastT.py python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f import_merge/importSpan.py python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importSRestart.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importSubRestart.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importTailOverlap.py -python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f import_merge/importTail.py python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importTORestart.py -python3 ./test.py $1 -s && sleep 1 -python3 ./test.py $1 -f import_merge/importTPORestart.py -python3 ./test.py $1 -s && sleep 1 python3 ./test.py $1 -f import_merge/importTRestart.py python3 ./test.py $1 -s && sleep 1 diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index e492a35c2061f4638ffdee83aea925de135e84de..23adab2c474962af95107006c8bb2c342cc688b1 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -201,8 +201,8 @@ class TDDnode: self.running = 1 tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) - tdLog.debug("wait 2 seconds for the dnode:%d to start." % (self.index)) - time.sleep(2) + tdLog.debug("wait 4 seconds for the dnode:%d to start." % (self.index)) + time.sleep(4) def stop(self): if self.valgrind == 0: diff --git a/tests/pytest/valgrind-test.sh b/tests/pytest/valgrind-test.sh new file mode 100755 index 0000000000000000000000000000000000000000..853ebe1d76f4c3dfcc764dfff83397f247748929 --- /dev/null +++ b/tests/pytest/valgrind-test.sh @@ -0,0 +1,45 @@ +#!/bin/bash +python3 ./test.py $1 -f insert/basic.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f insert/int.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f insert/float.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f insert/bigint.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f insert/bool.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f insert/double.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f insert/smallint.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f insert/tinyint.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f insert/binary.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f insert/date.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f insert/nchar.py +python3 ./test.py $1 -s && sleep 1 + +python3 ./test.py $1 -f table/column_name.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f table/column_num.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f table/db_table.py +python3 ./test.py $1 -s && sleep 1 + +python3 ./test.py $1 -f import_merge/importCacheFileT.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f import_merge/importDataLastSub.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f import_merge/importHead.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f import_merge/importLastT.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f import_merge/importSpan.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f import_merge/importTail.py +python3 ./test.py $1 -s && sleep 1 +python3 ./test.py $1 -f import_merge/importTRestart.py +python3 ./test.py $1 -s && sleep 1 diff --git a/tests/script/basicSuite.sim b/tests/script/basicSuite.sim index aa0fbf65b7114b4a18adfccd7f2137a9ecb3fa63..a99edfd93d1d06701542877b9831603e5204110e 100644 --- a/tests/script/basicSuite.sim +++ b/tests/script/basicSuite.sim @@ -1,26 +1,20 @@ -################################# run general/cache/new_metrics.sim run general/compress/compress.sim run general/compute/avg.sim -run general/compute/bottom.sim run general/compute/count.sim run general/db/len.sim run general/db/basic4.sim run general/http/restful_insert.sim run general/import/basic.sim run general/import/commit.sim -run general/insert/basic.sim run general/insert/query_file_memory.sim run general/parser/binary_escapeCharacter.sim run general/parser/columnValue_bigint.sim run general/parser/select_from_cache_disk.sim run general/table/autocreate.sim -run general/table/basic3.sim run general/table/column_name.sim run general/table/int.sim run general/table/vgroup.sim run general/user/basic1.sim run general/user/pass_alter.sim -run general/user/user_len.sim run general/vector/single.sim -################################## diff --git a/tests/script/general/cache/restart_metrics.sim b/tests/script/general/cache/restart_metrics.sim index dbd15b945f6cb855a67f2531547af8c95b874897..c85a66869ac6cc24fc14645ccf1aa4fc7a1f4e8f 100644 --- a/tests/script/general/cache/restart_metrics.sim +++ b/tests/script/general/cache/restart_metrics.sim @@ -27,11 +27,15 @@ sql create table $tb using $mt tags( "1" ) sql insert into $tb values (now, 1) sql select * from $tb -print ===>rows $rows, data $data01 +#print ===>rows $rows, data $data01 + if $rows != 1 then + print expect 1, actual: $rows return -1 -endi +endi + if $data01 != 1 then + print expect 1 actual: $data01 return -1 endi @@ -66,8 +70,10 @@ sql select * from $tb print ===>rows $rows, data $data01 if $rows != 1 then return -1 -endi +endi + if $data01 != 1 then + print expect 1, actual $data01 return -1 endi @@ -80,6 +86,7 @@ if $data01 != 1 then return -1 endi if $data02 != 3 then + print expect 3 actual: $data02 return -1 endi diff --git a/tests/script/general/db/delete_reuse2.sim b/tests/script/general/db/delete_reuse2.sim index 0117d87104fc98ed01422c6534afdb57566b6284..d8b27630427ac434dfce6649648c78edf09ac27a 100644 --- a/tests/script/general/db/delete_reuse2.sim +++ b/tests/script/general/db/delete_reuse2.sim @@ -26,6 +26,7 @@ system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4 print ========= start dnodes +sleep 2000 system sh/exec.sh -n dnode1 -s start sleep 3000 sql connect diff --git a/tests/script/general/db/testSuite.sim b/tests/script/general/db/testSuite.sim index 2cac8b8fa882e10befcb6d6602665373cef1e8f6..87863001a3dfab6bb16a9919910b5e7bedd7863d 100644 --- a/tests/script/general/db/testSuite.sim +++ b/tests/script/general/db/testSuite.sim @@ -1,4 +1,4 @@ -run general//db/basic.sim +run general/db/basic.sim run general/db/basic1.sim run general/db/basic2.sim run general/db/basic3.sim @@ -13,3 +13,4 @@ run general/db/delete_writing2.sim run general/db/len.sim run general/db/repeat.sim run general/db/tables.sim +run general/db/vnodes.sim diff --git a/tests/script/general/db/vnodes.sim b/tests/script/general/db/vnodes.sim new file mode 100644 index 0000000000000000000000000000000000000000..684910884b8f5cc3a440a6d60272ddaff048352b --- /dev/null +++ b/tests/script/general/db/vnodes.sim @@ -0,0 +1,45 @@ +system sh/stop_dnodes.sh + +$totalVnodes = 10 +$maxTables = 4 +$totalRows = $totalVnodes * $maxTables + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $maxTables +system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v $totalVnodes +system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 100000 +system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 100000 +system sh/cfg.sh -n dnode1 -c maxShellConns -v 100000 +system sh/cfg.sh -n dnode1 -c maxMgmtConnections -v 100000 + +print ========== prepare data +system sh/exec.sh -n dnode1 -s start +sleep 3000 +sql connect +sql create database db blocks 2 cache 1 maxTables $maxTables +sql use db + +print ========== step1 +sql create table mt (ts timestamp, tbcol int) TAGS(tgcol int) + +$x = 0 +while $x < $totalRows + $tb = t . $x + sql create table $tb using mt tags( $x ) + sql insert into $tb values (now, $x ) + $x = $x + 1 +endw + +print ========== step2 +sql select * from mt +if $rows != $totalRows then + return -1 +endi + +sql select count(*) from mt +if $data00 != $totalRows then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/field/bigint.sim b/tests/script/general/field/bigint.sim index 5cca73595c9159c8da8cea31e24471814f8f9771..10060f7422f50ccbb36b118d9dbfbc7729059e3f 100644 --- a/tests/script/general/field/bigint.sim +++ b/tests/script/general/field/bigint.sim @@ -132,7 +132,9 @@ endi print =============== step6 sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from $mt where tbcol = 1 group by tgcol print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +print $data10 $data11 $data12 $data13 $data14 $data15 $data16 +if $data00 != 100 then + print expect 100, actual $data00 return -1 endi diff --git a/tests/script/general/parser/auto_create_tb.sim b/tests/script/general/parser/auto_create_tb.sim index 8fb0ddae545855fd44df5ef1c602521a3e5c112a..54e028a2005c34964e10ffe4ee5f55d0fc24d85e 100644 --- a/tests/script/general/parser/auto_create_tb.sim +++ b/tests/script/general/parser/auto_create_tb.sim @@ -96,25 +96,25 @@ $ts1 = $ts0 + 1000 $ts2 = $ts0 + 2000 sql insert into tb_1 using $stb tags (-1) values ( $ts1 , 1,1,1,1,'bin',1,1,1,'涛思数据') ( $ts2 , 2,2,2,2,'binar', 1,1,1,'nchar') sql select * from $stb -if $rows != 3 then +if $rows != 5 then return -1 endi -if $data19 != 涛思数据 then +if $data09 != 涛思数据 then return -1 endi -if $data11 != 1 then +if $data01 != 1 then return -1 endi -if $data22 != 2 then +if $data42 != 2 then return -2 endi -if $data23 != 2.00000 then +if $data43 != 2.00000 then return -1 endi -if $data25 != binar then +if $data45 != binar then return -1 endi -if $data29 != nchar then +if $data49 != nchar then return -1 endi sql drop table tb_1 @@ -127,22 +127,22 @@ sql select * from $stb if $rows != 5 then return -1 endi -if $data19 != 涛思数据 then +if $data09 != 涛思数据 then return -1 endi -if $data11 != 1 then +if $data01 != 1 then return -1 endi -if $data22 != 2 then +if $data42 != 2 then return -2 endi -if $data23 != 2.00000 then +if $data43 != 2.00000 then return -1 endi -if $data25 != binar then +if $data45 != binar then return -1 endi -if $data29 != nchar then +if $data49 != nchar then return -1 endi @@ -154,13 +154,13 @@ sql show tables if $rows != 3 then return -1 endi -if $data00 != tb3 then +if $data00 != tb1 then return -1 endi if $data10 != tb2 then return -1 endi -if $data20 != tb1 then +if $data20 != tb3 then return -1 endi diff --git a/tests/script/general/parser/lastrow_query.sim b/tests/script/general/parser/lastrow_query.sim index a5e003fb651e69024326c58e5d8716d6509f28a2..e43cc1517398dd067bad7531d0960b3a459cde27 100644 --- a/tests/script/general/parser/lastrow_query.sim +++ b/tests/script/general/parser/lastrow_query.sim @@ -43,9 +43,11 @@ if $data07 != 1 then return -1 endi if $data08 != BINARY then + print expect BINARY actual: $data08 return -1 endi if $data09 != NCHAR then + print expect NCHAR actual: $data09 return -1 endi diff --git a/tests/script/general/parser/nchar.sim b/tests/script/general/parser/nchar.sim index fbdc12446ac050cf36e751a971a00e20e73be92b..2a6bc83b41e5bce4975a29cea5e2a5a32a393a44 100644 --- a/tests/script/general/parser/nchar.sim +++ b/tests/script/general/parser/nchar.sim @@ -194,8 +194,10 @@ sql reset query cache sql select * from $mt where tgcol = '1' step2: if $rows != 100 then + print expect 100, actual: $rows return -1 endi + sql select * from $mt where tgcol > '0' #print rows = $rows if $rows != 100 then diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index aaf8979337f8914d0fc2aa50aceda2cff6c8bd71..a2a08097dd217c8509794e486aef3efedb0d1686 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -1,113 +1,98 @@ -# run general/parser/alter.sim -# sleep 2000 -# run general/parser/alter1.sim -# sleep 2000 -# run general/parser/alter_stable.sim -# sleep 2000 -# run general/parser/auto_create_tb.sim -# sleep 2000 -# run general/parser/auto_create_tb_drop_tb.sim -# sleep 2000 +#run general/parser/alter.sim +#sleep 2000 +#run general/parser/alter1.sim +#sleep 2000 +#run general/parser/alter_stable.sim +#sleep 2000 +#run general/parser/auto_create_tb.sim +#sleep 2000 +#run general/parser/auto_create_tb_drop_tb.sim -run general/parser/binary_escapeCharacter.sim -sleep 2000 -run general/parser/bug.sim sleep 2000 run general/parser/col_arithmetic_operation.sim sleep 2000 -run general/parser/columnValue_bigint.sim +run general/parser/columnValue.sim sleep 2000 -run general/parser/columnValue_bool.sim +run general/parser/commit.sim sleep 2000 -run general/parser/columnValue_double.sim +run general/parser/create_db.sim sleep 2000 -run general/parser/columnValue_float.sim +run general/parser/create_mt.sim sleep 2000 -run general/parser/columnValue_int.sim +run general/parser/create_tb.sim +sleep 2000 +run general/parser/dbtbnameValidate.sim +sleep 2000 +run general/parser/import_commit1.sim +sleep 2000 +run general/parser/import_commit2.sim +sleep 2000 +run general/parser/import_commit3.sim +sleep 2000 +run general/parser/insert_tb.sim +sleep 2000 +run general/parser/first_last.sim +sleep 2000 +run general/parser/import_file.sim +sleep 2000 +run general/parser/lastrow.sim +sleep 2000 +run general/parser/nchar.sim +sleep 2000 +run general/parser/null_char.sim +sleep 2000 +run general/parser/fill.sim +sleep 2000 +run general/parser/fill_stb.sim +sleep 2000 +run general/parser/tags_dynamically_specifiy.sim +sleep 2000 +run general/parser/interp.sim +sleep 2000 +run general/parser/limit.sim +sleep 2000 +run general/parser/limit1.sim +sleep 2000 +run general/parser/limit1_tblocks100.sim +sleep 2000 +run general/parser/limit2.sim +sleep 2000 +run general/parser/mixed_blocks.sim +sleep 2000 +run general/parser/selectResNum.sim +sleep 2000 +run general/parser/select_across_vnodes.sim sleep 2000 - -# sleep 2000 -# run general/parser/col_arithmetic_operation.sim -# sleep 2000 -# run general/parser/columnValue.sim -# sleep 2000 -# run general/parser/commit.sim -# run general/parser/create_db.sim -# sleep 2000 -# run general/parser/create_mt.sim -# sleep 2000 -# run general/parser/create_tb.sim -# sleep 2000 -# run general/parser/dbtbnameValidate.sim -# sleep 2000 -# run general/parser/fill.sim -# sleep 2000 -# run general/parser/fill_stb.sim -# sleep 2000 -# run general/parser/first_last.sim -# sleep 2000 -# run general/parser/import_commit1.sim -# sleep 2000 -# run general/parser/import_commit2.sim -# sleep 2000 -# run general/parser/import_commit3.sim -# sleep 2000 -# run general/parser/import_file.sim -# sleep 2000 -# run general/parser/insert_tb.sim -# sleep 2000 -# run general/parser/tags_dynamically_specifiy.sim -# sleep 2000 -# run general/parser/interp.sim -# run general/parser/lastrow.sim -# sleep 2000 -# run general/parser/limit.sim -# sleep 2000 -# run general/parser/limit1.sim -# sleep 2000 -# run general/parser/limit1_tblocks100.sim -# sleep 2000 -# run general/parser/limit2.sim -# sleep 2000 -# run general/parser/mixed_blocks.sim -# sleep 2000 -# run general/parser/nchar.sim -# sleep 2000 -# run general/parser/null_char.sim -# sleep 2000 -# run general/parser/selectResNum.sim -# sleep 2000 -# run general/parser/select_across_vnodes.sim -# sleep 2000 run general/parser/select_from_cache_disk.sim sleep 2000 -# run general/parser/set_tag_vals.sim -# sleep 2000 -# run general/parser/single_row_in_tb.sim -# sleep 2000 -# run general/parser/slimit.sim -# sleep 2000 +run general/parser/set_tag_vals.sim +sleep 2000 +run general/parser/single_row_in_tb.sim +sleep 2000 +run general/parser/slimit.sim +sleep 2000 run general/parser/slimit1.sim sleep 2000 -run general/parser/slimit1_query.sim -sleep 2000 -# run general/parser/slimit_alter_tags.sim -# sleep 2000 -# run general/parser/stream_on_sys.sim -# sleep 2000 -# run general/parser/stream.sim -# sleep 2000 -# run general/parser/tbnameIn.sim -# sleep 2000 -# run general/parser/where.sim -# sleep 2000 -# #run general/parser/repeatAlter.sim -# sleep 2000 -# #run general/parser/repeatStream.sim -# sleep 2000 -# run general/parser/join.sim -# run general/parser/join_multivnode.sim -# run general/parser/projection_limit_offset.sim -# sleep 2000 -# run general/parser/select_with_tags.sim -# run general/parser/groupby.sim +run general/parser/slimit_alter_tags.sim +sleep 2000 +run general/parser/stream_on_sys.sim +sleep 2000 +run general/parser/stream.sim +sleep 2000 +run general/parser/tbnameIn.sim +sleep 2000 +run general/parser/where.sim +sleep 2000 +#run general/parser/repeatAlter.sim +sleep 2000 +#run general/parser/repeatStream.sim +sleep 2000 +run general/parser/join.sim +sleep 2000 +run general/parser/join_multivnode.sim +sleep 2000 +run general/parser/projection_limit_offset.sim +sleep 2000 +run general/parser/select_with_tags.sim +sleep 2000 +run general/parser/groupby.sim diff --git a/tests/script/general/table/fill.sim b/tests/script/general/table/fill.sim index 00048eb0250242a6975ec8e7b0e15e86a6368390..333573e577b636ef780e652d1dc61d4d6701f925 100644 --- a/tests/script/general/table/fill.sim +++ b/tests/script/general/table/fill.sim @@ -42,7 +42,7 @@ sql select count(*), last(ts), min(k), max(k), avg(k) from db.mt where a=0 and t print =================== step2 system sh/exec.sh -n dnode1 -s stop -x SIGINT -sleep 10000 +sleep 5000 system sh/exec.sh -n dnode1 -s start sleep 3000 diff --git a/tests/script/general/tag/bool.sim b/tests/script/general/tag/bool.sim index 540eaf123872d3cbdba15fd3556866802992be9a..349cb738bffbb216305af0718611987ae7f20f9c 100644 --- a/tests/script/general/tag/bool.sim +++ b/tests/script/general/tag/bool.sim @@ -122,7 +122,8 @@ if $rows != 100 then return -1 endi sql select * from $mt where tgcol = 1 -if $rows != 100 then +if $rows != 100 then + print expect 100, actual:$rows return -1 endi sql select * from $mt where tgcol <> 1 diff --git a/tests/script/general/tag/filter.sim b/tests/script/general/tag/filter.sim index 4388f029f9c84b707ae564962e398c098b4b5529..75a6ed00da6d3b8173b748961d42ab8f909e7ef1 100644 --- a/tests/script/general/tag/filter.sim +++ b/tests/script/general/tag/filter.sim @@ -122,7 +122,8 @@ endi print =============== step14 sql select count(tbcol) as c from $mt where ts > 1000 group by tgcol print $data00 $data01 $data02 $data03 $data04 $data05 $data06 -if $data00 != 100 then +if $data00 != 100 then + print expect 100, actual $data00 return -1 endi diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index d96a1db2b40c37d87366dff31448064b26169b07..62bae1c2a5f2c78537ec7597cdae9e543ac95591 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -1,9 +1,6 @@ cd ../../debug; cmake .. -#cd ../../debug; make clean cd ../../debug; make - cd ../../../debug; cmake .. -#cd ../../../debug; make clean cd ../../../debug; make #./test.sh -f general/alter/cached_schema_after_alter.sim @@ -15,12 +12,12 @@ cd ../../../debug; make #./test.sh -f general/alter/table.sim ./test.sh -f general/cache/new_metrics.sim -#./test.sh -f general/cache/restart_metrics.sim +./test.sh -f general/cache/restart_metrics.sim ./test.sh -f general/cache/restart_table.sim -#./test.sh -f general/column/commit.sim -#./test.sh -f general/column/metrics.sim -#./test.sh -f general/column/table.sim +#hongze ./test.sh -f general/column/commit.sim +#hongze ./test.sh -f general/column/metrics.sim +#hongze ./test.sh -f general/column/table.sim ./test.sh -f general/compress/commitlog.sim ./test.sh -f general/compress/compress.sim @@ -31,11 +28,11 @@ cd ../../../debug; make ./test.sh -f general/compute/bottom.sim ./test.sh -f general/compute/count.sim ./test.sh -f general/compute/diff.sim -#./test.sh -f general/compute/diff2.sim +# liao./test.sh -f general/compute/diff2.sim ./test.sh -f general/compute/first.sim -#./test.sh -f general/compute/interval.sim -#./test.sh -f general/compute/last.sim -#./test.sh -f general/compute/leastsquare.sim +# liao./test.sh -f general/compute/interval.sim +# liao./test.sh -f general/compute/last.sim +# liao./test.sh -f general/compute/leastsquare.sim ./test.sh -f general/compute/max.sim ./test.sh -f general/compute/min.sim ./test.sh -f general/compute/null.sim @@ -53,41 +50,42 @@ cd ../../../debug; make ./test.sh -f general/db/delete_reuse1.sim ./test.sh -f general/db/delete_reuse2.sim ./test.sh -f general/db/delete_reusevnode.sim -#./test.sh -f general/db/delete_reusevnode2.sim +./test.sh -f general/db/delete_reusevnode2.sim ./test.sh -f general/db/delete_writing1.sim ./test.sh -f general/db/delete_writing2.sim ./test.sh -f general/db/len.sim +#./test.sh -u -f general/db/vnodes.sim ./test.sh -f general/db/repeat.sim ./test.sh -f general/db/tables.sim -#./test.sh -f general/field/2.sim +./test.sh -f general/field/2.sim #./test.sh -f general/field/3.sim #./test.sh -f general/field/4.sim #./test.sh -f general/field/5.sim #./test.sh -f general/field/6.sim -##./test.sh -f general/field/bigint.sim -#./test.sh -f general/field/binary.sim -##./test.sh -f general/field/bool.sim -#./test.sh -f general/field/single.sim -##./test.sh -f general/field/smallint.sim -##./test.sh -f general/field/tinyint.sim - -##./test.sh -f general/http/restful.sim +./test.sh -f general/field/bigint.sim +# liao./test.sh -f general/field/binary.sim +./test.sh -f general/field/bool.sim +./test.sh -f general/field/single.sim +./test.sh -f general/field/smallint.sim +./test.sh -f general/field/tinyint.sim + +# jeff ./test.sh -f general/http/restful.sim ./test.sh -f general/http/restful_insert.sim ./test.sh -f general/http/restful_limit.sim -##./test.sh -f general/http/restful_full.sim +# jeff ./test.sh -f general/http/restful_full.sim ./test.sh -f general/http/prepare.sim ./test.sh -f general/http/telegraf.sim ./test.sh -f general/http/grafana_bug.sim -#./test.sh -f general/http/grafana.sim +# jeff ./test.sh -f general/http/grafana.sim ./test.sh -f general/import/basic.sim ./test.sh -f general/import/commit.sim ./test.sh -f general/import/large.sim -#./test.sh -f general/import/replica1.sim +#hongze ./test.sh -f general/import/replica1.sim ./test.sh -f general/insert/basic.sim -#./test.sh -f general/insert/insert_drop.sim +#hongze ./test.sh -f general/insert/insert_drop.sim ./test.sh -f general/insert/query_block1_memory.sim ./test.sh -f general/insert/query_block2_memory.sim ./test.sh -f general/insert/query_block1_file.sim @@ -96,8 +94,6 @@ cd ../../../debug; make ./test.sh -f general/insert/query_multi_file.sim ./test.sh -f general/insert/tcp.sim -#parser - # ./test.sh -f general/parser/alter.sim # ./test.sh -f general/parser/alter1.sim # ./test.sh -f general/parser/alter_stable.sim @@ -112,8 +108,8 @@ cd ../../../debug; make ./test.sh -f general/parser/columnValue_float.sim ./test.sh -f general/parser/columnValue_int.sim # ./test.sh -f general/parser/col_arithmetic_operation.sim -# ./test.sh -f general/parser/columnValue.sim -# ./test.sh -f general/parser/commit.sim +./test.sh -f general/parser/columnValue.sim +./test.sh -f general/parser/commit.sim # ./test.sh -f general/parser/create_db.sim # ./test.sh -f general/parser/create_mt.sim # ./test.sh -f general/parser/create_tb.sim @@ -121,9 +117,9 @@ cd ../../../debug; make # ./test.sh -f general/parser/fill.sim # ./test.sh -f general/parser/fill_stb.sim # ./test.sh -f general/parser/first_last.sim -# ./test.sh -f general/parser/import_commit1.sim -# ./test.sh -f general/parser/import_commit2.sim -# ./test.sh -f general/parser/import_commit3.sim +./test.sh -f general/parser/import_commit1.sim +./test.sh -f general/parser/import_commit2.sim +./test.sh -f general/parser/import_commit3.sim # ./test.sh -f general/parser/import_file.sim # ./test.sh -f general/parser/insert_tb.sim # ./test.sh -f general/parser/tags_dynamically_specifiy.sim @@ -160,14 +156,12 @@ cd ../../../debug; make #./test.sh -f general/stable/disk.sim #./test.sh -f general/stable/metrics.sim #./test.sh -f general/stable/values.sim -#./test.sh -f general/stable/vnode3.sim - -#stream +./test.sh -f general/stable/vnode3.sim ./test.sh -f general/table/autocreate.sim ./test.sh -f general/table/basic1.sim ./test.sh -f general/table/basic2.sim -./test.sh -f general/table/basic3.sim +#hongze ./test.sh -f general/table/basic3.sim ./test.sh -f general/table/bigint.sim ./test.sh -f general/table/binary.sim ./test.sh -f general/table/bool.sim @@ -177,17 +171,17 @@ cd ../../../debug; make ./test.sh -f general/table/column2.sim ./test.sh -f general/table/date.sim ./test.sh -f general/table/db.table.sim -#./test.sh -f general/table/delete_reuse1.sim -#./test.sh -f general/table/delete_reuse2.sim -#./test.sh -f general/table/delete_writing.sim -#./test.sh -f general/table/describe.sim +./test.sh -f general/table/delete_reuse1.sim +./test.sh -f general/table/delete_reuse2.sim +#hongze ./test.sh -f general/table/delete_writing.sim +./test.sh -f general/table/describe.sim ./test.sh -f general/table/double.sim -#./test.sh -f general/table/fill.sim +./test.sh -f general/table/fill.sim ./test.sh -f general/table/float.sim ./test.sh -f general/table/int.sim ./test.sh -f general/table/limit.sim ./test.sh -f general/table/smallint.sim -#./test.sh -f general/table/table_len.sim +./test.sh -f general/table/table_len.sim ./test.sh -f general/table/table.sim ./test.sh -f general/table/tinyint.sim ./test.sh -f general/table/vgroup.sim @@ -197,47 +191,45 @@ cd ../../../debug; make #./test.sh -f general/tag/5.sim #./test.sh -f general/tag/6.sim #./test.sh -f general/tag/add.sim -#./test.sh -f general/tag/bigint.sim +./test.sh -f general/tag/bigint.sim #./test.sh -f general/tag/binary_binary.sim #./test.sh -f general/tag/binary.sim #./test.sh -f general/tag/bool_binary.sim #./test.sh -f general/tag/bool_int.sim -#./test.sh -f general/tag/bool.sim +./test.sh -f general/tag/bool.sim #./test.sh -f general/tag/change.sim -#./test.sh -f general/tag/column.sim +#liao ./test.sh -f general/tag/column.sim #./test.sh -f general/tag/commit.sim -#./test.sh -f general/tag/create.sim +#liao ./test.sh -f general/tag/create.sim #./test.sh -f general/tag/delete.sim #./test.sh -f general/tag/double.sim -#./test.sh -f general/tag/filter.sim +./test.sh -f general/tag/filter.sim #./test.sh -f general/tag/float.sim #./test.sh -f general/tag/int_binary.sim #./test.sh -f general/tag/int_float.sim -#./test.sh -f general/tag/int.sim +./test.sh -f general/tag/int.sim #./test.sh -f general/tag/set.sim -#./test.sh -f general/tag/smallint.sim -#./test.sh -f general/tag/tinyint.sim +./test.sh -f general/tag/smallint.sim +./test.sh -f general/tag/tinyint.sim ./test.sh -f general/user/basic1.sim -#./test.sh -f general/user/monitor.sim +./test.sh -f general/user/monitor.sim ./test.sh -f general/user/pass_alter.sim ./test.sh -f general/user/pass_len.sim ./test.sh -f general/user/user_create.sim ./test.sh -f general/user/user_len.sim -#./test.sh -f general/vector/metrics_field.sim -#./test.sh -f general/vector/metrics_mix.sim -#./test.sh -f general/vector/metrics_query.sim -#./test.sh -f general/vector/metrics_tag.sim -#./test.sh -f general/vector/metrics_time.sim -#./test.sh -f general/vector/multi.sim +./test.sh -f general/vector/metrics_field.sim +./test.sh -f general/vector/metrics_mix.sim +./test.sh -f general/vector/metrics_query.sim +./test.sh -f general/vector/metrics_tag.sim +./test.sh -f general/vector/metrics_time.sim +#liao ./test.sh -f general/vector/multi.sim ./test.sh -f general/vector/single.sim -#./test.sh -f general/vector/table_field.sim -#./test.sh -f general/vector/table_mix.sim -#./test.sh -f general/vector/table_query.sim -#./test.sh -f general/vector/table_time.sim - -################################# +./test.sh -f general/vector/table_field.sim +./test.sh -f general/vector/table_mix.sim +./test.sh -f general/vector/table_query.sim +./test.sh -f general/vector/table_time.sim ./test.sh -u -f unique/account/account_create.sim ./test.sh -u -f unique/account/account_delete.sim @@ -252,8 +244,8 @@ cd ../../../debug; make ./test.sh -u -f unique/account/user_len.sim #./test.sh -u -f unique/big/balance.sim -#./test.sh -u -f unique/big/maxvnodes.sim -#./test.sh -u -f unique/big/tcp.sim +#slguan ./test.sh -u -f unique/big/maxvnodes.sim +./test.sh -u -f unique/big/tcp.sim ##./test.sh -u -f unique/cluster/balance1.sim ##./test.sh -u -f unique/cluster/balance2.sim @@ -298,27 +290,22 @@ cd ../../../debug; make #./test.sh -u -f unique/metrics/replica3_dnode6.sim #./test.sh -u -f unique/metrics/replica3_vnode3.sim -##./test.sh -u -f unique/mnode/mgmt22.sim -##./test.sh -u -f unique/mnode/mgmt23.sim -##./test.sh -u -f unique/mnode/mgmt24.sim -##./test.sh -u -f unique/mnode/mgmt25.sim -##./test.sh -u -f unique/mnode/mgmt26.sim -##./test.sh -u -f unique/mnode/mgmt33.sim -##./test.sh -u -f unique/mnode/mgmt34.sim +./test.sh -u -f unique/mnode/mgmt22.sim +./test.sh -u -f unique/mnode/mgmt23.sim +./test.sh -u -f unique/mnode/mgmt24.sim +./test.sh -u -f unique/mnode/mgmt25.sim +./test.sh -u -f unique/mnode/mgmt26.sim +./test.sh -u -f unique/mnode/mgmt33.sim +./test.sh -u -f unique/mnode/mgmt34.sim #./test.sh -u -f unique/mnode/mgmtr2.sim #./test.sh -u -f unique/mnode/secondIp.sim -#stream - ##./test.sh -u -f unique/table/delete_part.sim -##./test.sh -u -f unique/vnode/replica2_basic2.sim -##./test.sh -u -f unique/vnode/replica3_basic.sim - #./test.sh -u -f unique/vnode/commit.sim #./test.sh -u -f unique/vnode/many.sim #./test.sh -u -f unique/vnode/replica2_basic.sim -##./test.sh -u -f unique/vnode/replica2_basic2.sim +./test.sh -u -f unique/vnode/replica2_basic2.sim #./test.sh -u -f unique/vnode/replica2_repeat.sim ##./test.sh -u -f unique/vnode/replica3_basic.sim #./test.sh -u -f unique/vnode/replica3_repeat.sim diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh index 5587e3bb8c16d38e952630c894d99996e20cefaa..bc18479896993fa4a74febfdcd36387a011d95ba 100755 --- a/tests/script/sh/deploy.sh +++ b/tests/script/sh/deploy.sh @@ -99,7 +99,7 @@ echo "logDir $LOG_DIR" >> $TAOS_CFG echo "dDebugFlag 199" >> $TAOS_CFG echo "mDebugFlag 199" >> $TAOS_CFG echo "sdbDebugFlag 199" >> $TAOS_CFG -echo "rpcDebugFlag 135" >> $TAOS_CFG +echo "rpcDebugFlag 151" >> $TAOS_CFG echo "tmrDebugFlag 131" >> $TAOS_CFG echo "cDebugFlag 135" >> $TAOS_CFG echo "httpDebugFlag 135" >> $TAOS_CFG diff --git a/tests/script/test.sh b/tests/script/test.sh index 743597eabd1a301cc45990bce12f5727f18042e0..8e3959a680b8e3d908645802db2d93c946aa4d62 100755 --- a/tests/script/test.sh +++ b/tests/script/test.sh @@ -99,7 +99,8 @@ echo "cDebugFlag 135" >> $TAOS_CFG echo "httpDebugFlag 135" >> $TAOS_CFG echo "monitorDebugFlag 135" >> $TAOS_CFG echo "udebugFlag 135" >> $TAOS_CFG -echo "clog 0" >> $TAOS_CFG +echo "tablemetakeeptimer 5" >> $TAOS_CFG +echo "wal 0" >> $TAOS_CFG echo "asyncLog 0" >> $TAOS_CFG echo "locale en_US.UTF-8" >> $TAOS_CFG echo " " >> $TAOS_CFG diff --git a/tests/script/unique/account/account_create.sim b/tests/script/unique/account/account_create.sim index d1d5ebece037cec40f895ca1e3f02e2991c27923..1966e1a7ce745bcbc8760a703b1b30824dea67a2 100644 --- a/tests/script/unique/account/account_create.sim +++ b/tests/script/unique/account/account_create.sim @@ -3,8 +3,8 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/exec_up.sh -n dnode1 -s start -sql connect sleep 3000 +sql connect print ============================ dnode1 start diff --git a/tests/script/unique/account/account_delete.sim b/tests/script/unique/account/account_delete.sim index 71a75a7b350ce01ca7f0d2fac5e6d1ad3e7be0e9..40075dc1a6dc314b5b309670d6a51e89f949e84e 100644 --- a/tests/script/unique/account/account_delete.sim +++ b/tests/script/unique/account/account_delete.sim @@ -3,8 +3,8 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/exec_up.sh -n dnode1 -s start -sql connect sleep 3000 +sql connect print ============= step1 sql create account oroot pass 'taosdata' diff --git a/tests/script/unique/account/account_len.sim b/tests/script/unique/account/account_len.sim index ae2db26d58f5a51916eca2c7bd7ac675108ffbd5..81d0f0bfb134a4282823b40602bf53583ea17d90 100644 --- a/tests/script/unique/account/account_len.sim +++ b/tests/script/unique/account/account_len.sim @@ -3,8 +3,8 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/exec_up.sh -n dnode1 -s start -sql connect sleep 3000 +sql connect $i = 0 $dbPrefix = aldb diff --git a/tests/script/unique/account/authority.sim b/tests/script/unique/account/authority.sim index fe978a5827c89d6a4b6aa2fa2c9b5fdadb4b7c5e..2bb61cb0f28a180ef1960943717f0db2136b17ec 100644 --- a/tests/script/unique/account/authority.sim +++ b/tests/script/unique/account/authority.sim @@ -4,8 +4,8 @@ system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8 system sh/exec_up.sh -n dnode1 -s start -sql connect sleep 3000 +sql connect print ============= step1 diff --git a/tests/script/unique/account/basic.sim b/tests/script/unique/account/basic.sim index d06f7a15d9dd56b9f161d44f32c57e0f8156cb8f..adbc84d76679fee4ccd49f6951edb11c3b7df670 100644 --- a/tests/script/unique/account/basic.sim +++ b/tests/script/unique/account/basic.sim @@ -1,6 +1,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/exec_up.sh -n dnode1 -s start +sleep 3000 sql connect print =============== show accounts diff --git a/tests/script/unique/account/paras.sim b/tests/script/unique/account/paras.sim index b2e540400c0c5ab5849c0816a439ede7664350af..70a423f70f583734d69f6bda59f98a9a4ae4c3eb 100644 --- a/tests/script/unique/account/paras.sim +++ b/tests/script/unique/account/paras.sim @@ -1,6 +1,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/exec_up.sh -n dnode1 -s start +sleep 3000 sql connect print =============== show accounts diff --git a/tests/script/unique/account/pass_alter.sim b/tests/script/unique/account/pass_alter.sim index 936611833a83148378802bbca3c7c14c1df56915..644548be0bd49da1d015cd480b5b88435fbbe588 100644 --- a/tests/script/unique/account/pass_alter.sim +++ b/tests/script/unique/account/pass_alter.sim @@ -3,8 +3,8 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/exec_up.sh -n dnode1 -s start -sql connect sleep 3000 +sql connect print ============= step1 sql create user read pass 'taosdata1' diff --git a/tests/script/unique/account/pass_len.sim b/tests/script/unique/account/pass_len.sim index f4f9c0496421d5e0c9c7a2545f6eedc36d5f9e06..370ea94200b9aa860af69ac360d3058553e8cb74 100644 --- a/tests/script/unique/account/pass_len.sim +++ b/tests/script/unique/account/pass_len.sim @@ -3,8 +3,8 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/exec_up.sh -n dnode1 -s start -sql connect sleep 3000 +sql connect $i = 0 $dbPrefix = apdb diff --git a/tests/script/unique/account/usage.sim b/tests/script/unique/account/usage.sim index f022380ac416b41b43de098d2c27a37bd7df0d24..63c3d49f43e0d41f64bb6e011b5071cd893ec3ef 100644 --- a/tests/script/unique/account/usage.sim +++ b/tests/script/unique/account/usage.sim @@ -1,6 +1,7 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/exec_up.sh -n dnode1 -s start +sleep 3000 sql connect print =============== show accounts diff --git a/tests/script/unique/account/user_create.sim b/tests/script/unique/account/user_create.sim index 789035cfcca728df07853f2aac6ec333adb926cb..2a647862252f6d00e20c95f60ef49a66adc3375e 100644 --- a/tests/script/unique/account/user_create.sim +++ b/tests/script/unique/account/user_create.sim @@ -3,8 +3,8 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/exec_up.sh -n dnode1 -s start -sql connect sleep 3000 +sql connect print =============== step1 sql show users diff --git a/tests/script/unique/account/user_len.sim b/tests/script/unique/account/user_len.sim index 918551fa66375291f0cdee31af2317a4ade76621..1163ac2e4be84b974a30997a38ab9cbb9db1c696 100644 --- a/tests/script/unique/account/user_len.sim +++ b/tests/script/unique/account/user_len.sim @@ -3,8 +3,8 @@ system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c wallevel -v 0 system sh/exec_up.sh -n dnode1 -s start -sql connect sleep 3000 +sql connect $i = 0 $dbPrefix = lm_us_db diff --git a/tests/script/unique/big/balance.sim b/tests/script/unique/big/balance.sim index 52199f046e1c44d15957cda38003c62215779a97..4b0a6cd0bf25f1ad4a279ab9150adf1f114b3b60 100644 --- a/tests/script/unique/big/balance.sim +++ b/tests/script/unique/big/balance.sim @@ -20,7 +20,7 @@ system sh/cfg.sh -n dnode5 -c numOfTotalVnodes -v 4 system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 1000 print =============== prepare data -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start sleep 3000 sql connect @@ -87,7 +87,7 @@ endi print ========== step1 sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start $x = 0 show1: @@ -98,12 +98,12 @@ show1: endi sql show dnodes -print dnode1 freeVnodes $data3_192.168.0.1 -print dnode2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 2 then +print dnode1 openvnodes $data2_1 +print dnode2 openvnodes $data2_2 +if $data2_1 != 2 then goto show1 endi -if $data3_192.168.0.2 != 2 then +if $data2_2 != 2 then goto show1 endi @@ -139,7 +139,7 @@ endi print ========== step2 sql create dnode $hostname3 -system sh/exec.sh -n dnode3 -s start +system sh/exec_up.sh -n dnode3 -s start sleep 10000 print ========== step3 @@ -154,20 +154,20 @@ show3: endi sql show dnodes -print dnode1 freeVnodes $data3_192.168.0.1 -print dnode2 freeVnodes $data3_192.168.0.2 -print dnode3 freeVnodes $data3_192.168.0.3 -if $data3_192.168.0.1 != 2 then +print dnode1 freeVnodes $data2_1 +print dnode2 freeVnodes $data2_2 +print dnode3 freeVnodes $data2_3 +if $data2_1 != 2 then goto show3 endi -if $data3_192.168.0.2 != NULL then +if $data2_2 != NULL then goto show3 endi -if $data3_192.168.0.3 != 2 then +if $data2_3 != 2 then goto show3 endi -system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT sql select count(*) from t10 print select count(*) from t10 $data00 expect $rowNum @@ -211,16 +211,16 @@ show4: endi sql show dnodes -print dnode1 freeVnodes $data3_192.168.0.1 -print dnode3 freeVnodes $data3_192.168.0.3 -if $data3_192.168.0.1 != 0 then +print dnode1 freeVnodes $data2_1 +print dnode3 freeVnodes $data2_3 +if $data2_1 != 0 then goto show4 endi -if $data3_192.168.0.3 != NULL then +if $data2_3 != NULL then goto show4 endi -system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec_up.sh -n dnode3 -s stop -x SIGINT sql select count(*) from t10 print select count(*) from t10 $data00 expect $rowNum @@ -255,7 +255,7 @@ endi print ========== step5 sql alter database db replica 2 sql create dnode $hostname4 -system sh/exec.sh -n dnode4 -s start +system sh/exec_up.sh -n dnode4 -s start $x = 0 show5: @@ -266,12 +266,12 @@ show5: endi sql show dnodes -print dnode1 freeVnodes $data3_192.168.0.1 -print dnode4 freeVnodes $data3_192.168.0.4 -if $data3_192.168.0.1 != 0 then +print dnode1 freeVnodes $data2_1 +print dnode4 freeVnodes $data2_4 +if $data2_1 != 0 then goto show5 endi -if $data3_192.168.0.4 != 0 then +if $data2_4 != 0 then goto show5 endi diff --git a/tests/script/unique/big/maxvnodes.sim b/tests/script/unique/big/maxvnodes.sim index 00995ba32c4e57f957557bf1ac72423244c75192..3015d07b6dfdca7cc67e1621c096d8238b063129 100644 --- a/tests/script/unique/big/maxvnodes.sim +++ b/tests/script/unique/big/maxvnodes.sim @@ -1,25 +1,32 @@ system sh/stop_dnodes.sh +$totalVnodes = 100 +$minVnodes = 48 +$maxVnodes = 52 +$maxTables = 4 +$totalRows = $totalVnodes * $maxTables + system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 0 -system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 100 -system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 256 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v $maxTables +system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v $totalVnodes system sh/cfg.sh -n dnode1 -c maxVnodeConnections -v 100000 system sh/cfg.sh -n dnode1 -c maxMeterConnections -v 100000 system sh/cfg.sh -n dnode1 -c maxShellConns -v 100000 system sh/cfg.sh -n dnode1 -c maxMgmtConnections -v 100000 print ========== prepare data -system sh/exec.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start +sleep 3000 sql connect -sql create database db +sql create database db blocks 2 cache 1 maxTables $maxTables sql use db print ========== step1 sql create table mt (ts timestamp, tbcol int) TAGS(tgcol int) $x = 0 -while $x < 25600 +while $x < $totalRows $tb = t . $x sql create table $tb using mt tags( $x ) sql insert into $tb values (now, $x ) @@ -28,16 +35,15 @@ endw print ========== step2 sql select * from mt -if $rows != 25600 then +if $rows != $totalRows then return -1 endi sql select count(*) from mt -if $data00 != 25600 then +if $data00 != $totalRows then return -1 endi - system sh/deploy.sh -n dnode2 -i 2 system sh/cfg.sh -n dnode2 -c walLevel -v 0 system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 100 @@ -49,7 +55,7 @@ system sh/cfg.sh -n dnode2 -c maxMgmtConnections -v 100000 print ========== step2 sql create dnode $hostname2 -system sh/exec.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start $x = 0 show3: @@ -60,23 +66,23 @@ show3: endi sql show dnodes -print dnode1 freeVnodes $data3_192.168.0.1 -print dnode2 freeVnodes $data3_192.168.0.2 -if $data3_192.168.0.1 != 126 then +print dnode1 openvnodes $data2_1 +print dnode2 openvnodes $data2_2 +if $data2_1 != $minVnodes then goto show3 endi -if $data3_192.168.0.2 != 130 then +if $data2_2 != $maxVnodes then goto show3 endi print ========== step4 sql select * from mt -if $rows != 25600 then +if $rows != $totalRows then return -1 endi sql select count(*) from mt -if $data00 != 25600 then +if $data00 != $totalRows then return -1 endi diff --git a/tests/script/unique/big/tcp.sim b/tests/script/unique/big/tcp.sim index 1fb414ec748a290db1bd6ec5987ab335f3aa1c20..590035c0f8afa38e57248e586ebb361e71b2da3f 100644 --- a/tests/script/unique/big/tcp.sim +++ b/tests/script/unique/big/tcp.sim @@ -14,8 +14,8 @@ system sh/cfg.sh -n dnode1 -c adminDebugFlag -v 131 system sh/cfg.sh -n dnode1 -c httpDebugFlag -v 135 system sh/cfg.sh -n dnode1 -c debugFlag -v 131 -system sh/exec.sh -n dnode1 -s start - +system sh/exec_up.sh -n dnode1 -s start +sleep 3000 sql connect print ======================== dnode1 start @@ -52,7 +52,7 @@ while $i < $tbNum endw $i = 0 -while $i < 100 +while $i < 10 print =============== step3 $i sql select count(*) from $mt print ===> $data00 $data01 @@ -83,7 +83,7 @@ while $i < $tbNum endw $i = 0 -while $i < 100 +while $i < 10 print =============== step5 $i sql select count(*) from $mt where tgcol < 20200 print ===> $data00 $data01 diff --git a/tests/script/unique/mnode/mgmt22.sim b/tests/script/unique/mnode/mgmt22.sim index 37e38fbf66c7a31fabf8bcb229967f20c4196ab9..e4e1235e66fd15c16488d46be11977280672cb54 100644 --- a/tests/script/unique/mnode/mgmt22.sim +++ b/tests/script/unique/mnode/mgmt22.sim @@ -8,7 +8,7 @@ system sh/cfg.sh -n dnode2 -c numOfMPeers -v 2 system sh/cfg.sh -n dnode3 -c numOfMPeers -v 2 print ============== step1 -system sh/exec_up.sh -n dnode1 -s start +system sh/exec_up.sh -n dnode1 -s start -t sleep 3000 sql connect @@ -20,7 +20,7 @@ if $data2_1 != master then endi print ============== step2 -system sh/exec_up.sh -n dnode2 -s start +system sh/exec_up.sh -n dnode2 -s start -t sql create dnode $hostname2 $x = 0 @@ -41,6 +41,17 @@ if $data2_2 != slave then goto show2 endi +system sh/exec_up.sh -n dnode1 -s stop -x SIGINT +system sh/exec_up.sh -n dnode2 -s stop -x SIGINT +system sh/exec_up.sh -n dnode3 -s stop -x SIGINT +system sh/exec_up.sh -n dnode4 -s stop -x SIGINT +system sh/exec_up.sh -n dnode5 -s stop -x SIGINT +system sh/exec_up.sh -n dnode6 -s stop -x SIGINT +system sh/exec_up.sh -n dnode7 -s stop -x SIGINT +system sh/exec_up.sh -n dnode8 -s stop -x SIGINT + +return + print ============== step3 sql_error drop dnode $hostname1 -x error1 print should not drop master diff --git a/tests/script/unique/mnode/mgmt23.sim b/tests/script/unique/mnode/mgmt23.sim index 99d91595735e5b71d480b9bb0514808e3c8d7c86..8c144dd0c44e23dc381ead7aa3ff32815965b6e1 100644 --- a/tests/script/unique/mnode/mgmt23.sim +++ b/tests/script/unique/mnode/mgmt23.sim @@ -59,7 +59,7 @@ endi if $dnode2Role != slave then return -1 endi -if $dnode3Role != NULL then +if $dnode3Role != null then return -1 endi @@ -78,7 +78,7 @@ print dnode3 ==> $dnode3Role if $dnode1Role != master then return -1 endi -if $dnode2Role != NULL then +if $dnode2Role != null then return -1 endi if $dnode3Role != slave then @@ -106,7 +106,7 @@ print dnode3 ==> $dnode3Role if $dnode1Role != master then return -1 endi -if $dnode2Role != NULL then +if $dnode2Role != null then return -1 endi if $dnode3Role != slave then diff --git a/tests/script/unique/mnode/mgmt25.sim b/tests/script/unique/mnode/mgmt25.sim index ba545f94797261c8f394128027cba86718e0b1cc..8c8eeba466c31ab6efabd00e129a78d13c13ecd3 100644 --- a/tests/script/unique/mnode/mgmt25.sim +++ b/tests/script/unique/mnode/mgmt25.sim @@ -59,7 +59,7 @@ endi if $dnode2Role != slave then return -1 endi -if $dnode3Role != NULL then +if $dnode3Role != null then return -1 endi @@ -78,7 +78,7 @@ print dnode3 ==> $dnode3Role if $dnode1Role != master then return -1 endi -if $dnode2Role != NULL then +if $dnode2Role != null then return -1 endi if $dnode3Role != slave then diff --git a/tests/script/unique/mnode/mgmt26.sim b/tests/script/unique/mnode/mgmt26.sim index e5ead5bb3915d950ce5cd9999867647f671a85a2..373a0abaed191075666085574dc013c3f1cb0721 100644 --- a/tests/script/unique/mnode/mgmt26.sim +++ b/tests/script/unique/mnode/mgmt26.sim @@ -59,7 +59,7 @@ endi if $dnode2Role != slave then return -1 endi -if $dnode3Role != NULL then +if $dnode3Role != null then return -1 endi @@ -79,7 +79,7 @@ print dnode3 ==> $dnode3Role if $dnode1Role != master then return -1 endi -if $dnode2Role != NULL then +if $dnode2Role != null then return -1 endi if $dnode3Role != slave then @@ -106,7 +106,7 @@ print dnode3 ==> $dnode3Role if $dnode1Role != master then return -1 endi -if $dnode2Role != NULL then +if $dnode2Role != null then return -1 endi if $dnode3Role != slave then diff --git a/tests/script/unique/mnode/mgmt33.sim b/tests/script/unique/mnode/mgmt33.sim index 36b7ba81394d6055305f7b7dabf6b504e140720c..e3a62a2d22cae72ef7a12d9f85dc5da1046a88a3 100644 --- a/tests/script/unique/mnode/mgmt33.sim +++ b/tests/script/unique/mnode/mgmt33.sim @@ -18,10 +18,10 @@ print dnode3 ==> $data3_3 if $data2_1 != master then return -1 endi -if $data3_2 != NULL then +if $data3_2 != null then return -1 endi -if $data3_3 != NULL then +if $data3_3 != null then return -1 endi @@ -44,7 +44,7 @@ endi if $dnode2Role != slave then return -1 endi -if $dnode3Role != NULL then +if $dnode3Role != null then return -1 endi @@ -86,7 +86,7 @@ print dnode3 ==> $dnode3Role if $dnode1Role != master then return -1 endi -if $dnode2Role != NULL then +if $dnode2Role != null then return -1 endi if $dnode3Role != slave then @@ -156,7 +156,7 @@ print dnode1 ==> $dnode1Role print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role -if $dnode1Role != NULL then +if $dnode1Role != null then return -1 endi #if $dnode2Role != master then diff --git a/tests/script/unique/mnode/mgmt34.sim b/tests/script/unique/mnode/mgmt34.sim index 1139daea1cccfb87962aebc3a9fec1c63f2cd53b..4409c825c8138420ab853a787b8d8041bf544584 100644 --- a/tests/script/unique/mnode/mgmt34.sim +++ b/tests/script/unique/mnode/mgmt34.sim @@ -22,10 +22,10 @@ print dnode3 ==> $data3_3 if $data2_1 != master then return -1 endi -if $data3_2 != NULL then +if $data3_2 != null then return -1 endi -if $data3_3 != NULL then +if $data3_3 != null then return -1 endi @@ -50,10 +50,10 @@ endi if $dnode2Role != slave then return -1 endi -if $dnode3Role != NULL then +if $dnode3Role != null then return -1 endi -if $dnode4Role != NULL then +if $dnode4Role != null then return -1 endi @@ -81,7 +81,7 @@ endi if $dnode3Role != slave then return -1 endi -if $dnode4Role != NULL then +if $dnode4Role != null then return -1 endi @@ -110,7 +110,7 @@ endi if $dnode3Role != slave then return -1 endi -if $dnode4Role != NULL then +if $dnode4Role != null then return -1 endi @@ -131,7 +131,7 @@ print dnode4 ==> $dnode4Role if $dnode1Role != master then return -1 endi -if $dnode2Role != NULL then +if $dnode2Role != null then return -1 endi if $dnode3Role != slave then @@ -165,7 +165,7 @@ print dnode4 ==> $dnode4Role if $dnode1Role != master then return -1 endi -if $dnode2Role != NULL then +if $dnode2Role != null then return -1 endi if $dnode3Role != slave then @@ -207,7 +207,7 @@ print dnode2 ==> $dnode2Role print dnode3 ==> $dnode3Role print dnode4 ==> $dnode4Role -if $dnode1Role != NULL then +if $dnode1Role != null then return -1 endi if $dnode2Role != slave then diff --git a/tests/test-all.sh b/tests/test-all.sh index 93bfa7a4263e70f01ce206efa4a9e8a8b6674fc8..e58a6f51327c066b526d0332a2c7b1678d7cc4d4 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -21,11 +21,12 @@ fi echo -e "${GREEN} ### Total $totalSuccess TSIM case(s) succeed! ### ${NC}" totalFailed=`grep 'failed\|fault' out.txt | wc -l` -echo -e "${RED} ### Total $totalFailed TSIM case(s) failed! ### ${NC}" +# echo -e "${RED} ### Total $totalFailed TSIM case(s) failed! ### ${NC}" if [ "$totalFailed" -ne "0" ]; then -# echo -e "${RED} ### Total $totalFailed TSIM case(s) failed! ### ${NC}" - exit $totalFailed + echo -e "${RED} ### Total $totalFailed TSIM case(s) failed! ### ${NC}" + +# exit $totalFailed fi echo "### run Python script ###" @@ -46,6 +47,7 @@ fi totalPyFailed=`grep 'failed\|fault' pytest-out.txt | wc -l` if [ "$totalPyFailed" -ne "0" ]; then echo -e "${RED} ### Total $totalPyFailed python case(s) failed! ### ${NC}" - exit $totalPyFailed +# exit $totalPyFailed fi +exit $(($totalFailed + $totalPyFailed))