diff --git a/cmake/define.inc b/cmake/define.inc
index da100f4260c7f824f2b2d3f2ba562d7ea03e4f51..93bf6026106e4e2c6e788d2949446ab54b26813f 100755
--- a/cmake/define.inc
+++ b/cmake/define.inc
@@ -24,3 +24,11 @@ ENDIF ()
IF (TD_MEM_CHECK)
ADD_DEFINITIONS(-DTAOS_MEM_CHECK)
ENDIF ()
+
+IF (TD_RANDOM_FILE_FAIL)
+ ADD_DEFINITIONS(-DTAOS_RANDOM_FILE_FAIL)
+ENDIF ()
+
+IF (TD_RANDOM_NETWORK_FAIL)
+ ADD_DEFINITIONS(-DTAOS_RANDOM_NETWORK_FAIL)
+ENDIF ()
diff --git a/cmake/input.inc b/cmake/input.inc
index 5a17e0319c01d335e5940e0475db4f2f57d7bfd7..e963e202400aa759962bd300138cca3b04962dc6 100755
--- a/cmake/input.inc
+++ b/cmake/input.inc
@@ -30,4 +30,14 @@ ENDIF ()
IF (${MEM_CHECK} MATCHES "true")
SET(TD_MEM_CHECK TRUE)
MESSAGE(STATUS "build with memory check")
-ENDIF ()
\ No newline at end of file
+ENDIF ()
+
+IF (${RANDOM_FILE_FAIL} MATCHES "true")
+ SET(TD_RANDOM_FILE_FAIL TRUE)
+ MESSAGE(STATUS "build with random-file-fail enabled")
+ENDIF ()
+
+IF (${RANDOM_NETWORK_FAIL} MATCHES "true")
+ SET(TD_RANDOM_NETWORK_FAIL TRUE)
+ MESSAGE(STATUS "build with random-network-fail enabled")
+ENDIF ()
diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h
index b99a8a46d0874ee57d338389bde864fc1c9ae514..f687d7f244a42e255817a278e6169ce294788a38 100644
--- a/src/client/inc/tscUtil.h
+++ b/src/client/inc/tscUtil.h
@@ -176,8 +176,6 @@ SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex);
SArray* tscColumnListClone(const SArray* src, int16_t tableIndex);
void tscColumnListDestroy(SArray* pColList);
-SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
-
int32_t tscValidateName(SSQLToken* pToken);
void tscIncStreamExecutionCount(void* pStream);
diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h
index 6ce94d5aa4d10688d7f92dfe2c9ccbfc8852bc9d..c8754e5bebd7a460ed3e33a5e56a1e535dbf7035 100644
--- a/src/client/inc/tsclient.h
+++ b/src/client/inc/tsclient.h
@@ -32,8 +32,8 @@ extern "C" {
#include "qExecutor.h"
#include "qsqlparser.h"
-#include "qsqltype.h"
#include "qtsbuf.h"
+#include "tcmdtype.h"
// forward declaration
struct SSqlInfo;
@@ -395,7 +395,6 @@ TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port,
void *param, void **taos);
void waitForQueryRsp(void *param, TAOS_RES *tres, int code) ;
-int doAsyncParseSql(SSqlObj* pSql);
void doAsyncQuery(STscObj *pObj, SSqlObj *pSql, void (*fp)(), void *param, const char *sqlstr, size_t sqlLen);
void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql);
@@ -403,13 +402,14 @@ void tscKillSTableQuery(SSqlObj *pSql);
void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen);
bool tscIsUpdateQuery(SSqlObj* pSql);
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
+
+// todo remove this function.
bool tscResultsetFetchCompleted(TAOS_RES *result);
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
-void tscQueueAsyncFreeResult(SSqlObj *pSql);
int32_t tscToSQLCmd(SSqlObj *pSql, struct SSqlInfo *pInfo);
void tscGetResultColumnChr(SSqlRes *pRes, SFieldInfo* pFieldInfo, int32_t column);
diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c
index 2de45bcc6e0a0be3f41f492d0a0b760c7585d3a4..3fed3e4d6719d508ec629a215b3d0033f2a6eb27 100644
--- a/src/client/src/tscAsync.c
+++ b/src/client/src/tscAsync.c
@@ -213,27 +213,34 @@ void taos_fetch_rows_a(TAOS_RES *taosa, void (*fp)(void *, TAOS_RES *, int), voi
// handle the sub queries of join query
if (pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql);
- } else if (pRes->completed && pCmd->command == TSDB_SQL_FETCH) {
- if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
- tscTryQueryNextVnode(pSql, tscAsyncQueryRowsForNextVnode);
- return;
- } else {
- /*
+ } else if (pRes->completed) {
+ if(pCmd->command == TSDB_SQL_FETCH) {
+ if (hasMoreVnodesToTry(pSql)) { // sequentially retrieve data from remain vnodes.
+ tscTryQueryNextVnode(pSql, tscAsyncQueryRowsForNextVnode);
+ return;
+ } else {
+ /*
* all available virtual node has been checked already, now we need to check
* for the next subclause queries
- */
- if (pCmd->clauseIndex < pCmd->numOfClause - 1) {
- tscTryQueryNextClause(pSql, tscAsyncQueryRowsForNextVnode);
- return;
- }
-
- /*
+ */
+ if (pCmd->clauseIndex < pCmd->numOfClause - 1) {
+ tscTryQueryNextClause(pSql, tscAsyncQueryRowsForNextVnode);
+ return;
+ }
+
+ /*
* 1. has reach the limitation
* 2. no remain virtual nodes to be retrieved anymore
- */
+ */
+ (*pSql->fetchFp)(param, pSql, 0);
+ }
+ return;
+ } else if (pCmd->command == TSDB_SQL_RETRIEVE || pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE) {
+ // in case of show command, return no data
(*pSql->fetchFp)(param, pSql, 0);
+ } else {
+ assert(0);
}
- return;
} else { // current query is not completed, continue retrieve from node
if (pCmd->command != TSDB_SQL_RETRIEVE_LOCALMERGE && pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
@@ -405,17 +412,6 @@ void tscProcessAsyncFree(SSchedMsg *pMsg) {
taos_free_result(pSql);
}
-void tscQueueAsyncFreeResult(SSqlObj *pSql) {
- tscDebug("%p sqlObj put in queue to async free", pSql);
-
- SSchedMsg schedMsg = { 0 };
- schedMsg.fp = tscProcessAsyncFree;
- schedMsg.ahandle = pSql;
- schedMsg.thandle = (void *)1;
- schedMsg.msg = NULL;
- taosScheduleTask(tscQhandle, &schedMsg);
-}
-
int tscSendMsgToServer(SSqlObj *pSql);
void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index 83700ce0a573ccf15a474a58ec4ebfda2634e2fd..1d66fb046745af172fdd190732d2e5200f250b0f 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -406,7 +406,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
pSql->res.qhandle = 0x1;
pSql->res.numOfRows = 0;
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
- taosCacheEmpty(tscCacheHandle);
+ taosCacheEmpty(tscCacheHandle,false);
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
tscProcessServerVer(pSql);
} else if (pCmd->command == TSDB_SQL_CLI_VERSION) {
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index 9f557f5529e03ee504aea2c30cc6bfac113a06aa..2b325afa7cb52c94404f2b2b4b49a4390c216d8c 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -62,34 +62,34 @@ static int32_t setObjFullName(char* fullName, const char* account, SSQLToken* pD
static void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t nameLength);
static void getRevisedName(char* resultFieldName, int32_t functionId, int32_t maxLen, char* columnName);
-static int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult);
+static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult);
static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes,
int8_t type, char* fieldName, SSqlExpr* pSqlExpr);
static int32_t changeFunctionID(int32_t optr, int16_t* functionId);
static int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSelection, bool isSTable);
static bool validateIpAddress(const char* ip, size_t size);
-static bool hasUnsupportFunctionsForSTableQuery(SQueryInfo* pQueryInfo);
+static bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static bool functionCompatibleCheck(SQueryInfo* pQueryInfo);
static void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo);
static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd);
-static int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
-static int32_t parseSlidingClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
+static int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
+static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
-static int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pItem);
+static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExprItem* pItem);
static int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql);
-static int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL);
-static int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema);
+static int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL);
+static int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema);
-static int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo);
+static int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo);
-static int32_t validateSqlFunctionInStreamSql(SQueryInfo* pQueryInfo);
+static int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
static int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString);
-static int32_t validateFunctionsInIntervalOrGroupbyQuery(SQueryInfo* pQueryInfo);
-static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type);
+static int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
+static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type);
static int32_t validateDNodeConfig(tDCLSQL* pOptions);
static int32_t validateLocalConfig(tDCLSQL* pOptions);
static int32_t validateColumnName(char* name);
@@ -98,15 +98,15 @@ static int32_t setKillInfo(SSqlObj* pSql, struct SSqlInfo* pInfo, int32_t killTy
static bool validateOneTags(SSqlCmd* pCmd, TAOS_FIELD* pTagField);
static bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo);
-static int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t index, SQuerySQL* pQuerySql, SSqlObj* pSql);
+static int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t index, SQuerySQL* pQuerySql, SSqlObj* pSql);
static int32_t parseCreateDBOptions(SSqlCmd* pCmd, SCreateDBInfo* pCreateDbSql);
-static int32_t getColumnIndexByName(const SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
+static int32_t getColumnIndexByName(SSqlCmd* pCmd, const SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t getTableIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t optrToString(tSQLExpr* pExpr, char** exprString);
static int32_t getTableIndexImpl(SSQLToken* pTableToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex);
static int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo);
-static int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
+static int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
static int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCMCreateDbMsg* pCreate);
static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t columnIndex);
@@ -115,7 +115,7 @@ static int32_t doCheckForCreateTable(SSqlObj* pSql, int32_t subClauseIndex, SSql
static int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo);
static int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo);
static int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index);
-static int32_t exprTreeFromSqlExpr(tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray* pExprInfo, SQueryInfo* pQueryInfo, SArray* pCols);
+static int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray* pExprInfo, SQueryInfo* pQueryInfo, SArray* pCols);
/*
* Used during parsing query sql. Since the query sql usually small in length, error position
@@ -125,7 +125,7 @@ static int32_t invalidSqlErrMsg(char* dstBuffer, const char* errMsg) {
return tscInvalidSQLErrMsg(dstBuffer, errMsg, NULL);
}
-static int setColumnFilterInfoForTimestamp(SQueryInfo* pQueryInfo, tVariant* pVar) {
+static int setColumnFilterInfoForTimestamp(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tVariant* pVar) {
int64_t time = 0;
const char* msg = "invalid timestamp";
@@ -137,11 +137,11 @@ static int setColumnFilterInfoForTimestamp(SQueryInfo* pQueryInfo, tVariant* pVa
if (seg != NULL) {
if (taosParseTime(pVar->pz, &time, pVar->nLen, tinfo.precision, tsDaylight) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
} else {
if (tVariantDump(pVar, (char*)&time, TSDB_DATA_TYPE_BIGINT, true)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
}
@@ -563,7 +563,7 @@ static bool isTopBottomQuery(SQueryInfo* pQueryInfo) {
return false;
}
-int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
+int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* msg1 = "invalid query expression";
const char* msg2 = "interval cannot be less than 10 ms";
@@ -590,12 +590,12 @@ int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
// interval cannot be less than 10 milliseconds
if (pQueryInfo->intervalTime < tsMinIntervalTime) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// for top/bottom + interval query, we do not add additional timestamp column in the front
if (isTopBottomQuery(pQueryInfo)) {
- if (parseSlidingClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
+ if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -610,7 +610,7 @@ int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
for (int32_t i = 0; i < size; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
if (pExpr->functionId == TSDB_FUNC_COUNT && TSDB_COL_IS_TAG(pExpr->colInfo.flag)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -619,7 +619,7 @@ int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
* select tbname, tags_fields from super_table_name interval(1s)
*/
if (tscQueryTags(pQueryInfo) && pQueryInfo->intervalTime > 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
// need to add timestamp column in result set, if interval is existed
@@ -644,14 +644,14 @@ int32_t parseIntervalClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscAddSpecialColumnForSelect(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL);
- if (parseSlidingClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
+ if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
return TSDB_CODE_SUCCESS;
}
-int32_t parseSlidingClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
+int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* msg0 = "sliding value too small";
const char* msg1 = "sliding value no larger than the interval value";
@@ -666,11 +666,11 @@ int32_t parseSlidingClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
}
if (pQueryInfo->slidingTime < tsMinSlidingTime) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
} else {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
@@ -1124,12 +1124,12 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_PROJECTION_QUERY);
// select table_name1.field_name1, table_name2.field_name2 from table_name1, table_name2
- if (addProjectionExprAndResultField(pQueryInfo, pItem) != TSDB_CODE_SUCCESS) {
+ if (addProjectionExprAndResultField(pCmd, pQueryInfo, pItem) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
} else if (pItem->pNode->nSQLOptr >= TK_COUNT && pItem->pNode->nSQLOptr <= TK_TBID) {
// sql function in selection clause, append sql function info in pSqlCmd structure sequentially
- if (addExprAndResultField(pQueryInfo, outputIndex, pItem, true) != TSDB_CODE_SUCCESS) {
+ if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, pItem, true) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -1138,8 +1138,8 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
SColumnList columnList = {0};
int32_t arithmeticType = NON_ARITHMEIC_EXPR;
- if (validateArithmeticSQLExpr(pItem->pNode, pQueryInfo, &columnList, &arithmeticType) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ if (validateArithmeticSQLExpr(pCmd, pItem->pNode, pQueryInfo, &columnList, &arithmeticType) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int32_t tableIndex = columnList.ids[0].tableIndex;
@@ -1152,7 +1152,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
// all columns in arithmetic expression must belong to the same table
for (int32_t f = 1; f < columnList.num; ++f) {
if (columnList.ids[f].tableIndex != tableIndex) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
}
@@ -1172,10 +1172,10 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
tExprNode* pNode = NULL;
SArray* colList = taosArrayInit(10, sizeof(SColIndex));
- int32_t ret = exprTreeFromSqlExpr(&pNode, pItem->pNode, pQueryInfo->exprList, pQueryInfo, colList);
+ int32_t ret = exprTreeFromSqlExpr(pCmd, &pNode, pItem->pNode, pQueryInfo->exprList, pQueryInfo, colList);
if (ret != TSDB_CODE_SUCCESS) {
tExprTreeDestroy(&pNode, NULL);
- return invalidSqlErrMsg(pQueryInfo->msg, "invalid arithmetic expression in select clause");
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid arithmetic expression in select clause");
}
SBufferWriter bw = tbufInitWriter(NULL, false);
@@ -1215,10 +1215,10 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
pArithExprInfo->interBytes = sizeof(double);
pArithExprInfo->type = TSDB_DATA_TYPE_DOUBLE;
- int32_t ret = exprTreeFromSqlExpr(&pArithExprInfo->pExpr, pItem->pNode, pQueryInfo->exprList, pQueryInfo, NULL);
+ int32_t ret = exprTreeFromSqlExpr(pCmd, &pArithExprInfo->pExpr, pItem->pNode, pQueryInfo->exprList, pQueryInfo, NULL);
if (ret != TSDB_CODE_SUCCESS) {
tExprTreeDestroy(&pArithExprInfo->pExpr, NULL);
- return invalidSqlErrMsg(pQueryInfo->msg, "invalid expression in select clause");
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), "invalid expression in select clause");
}
pInfo->pArithExprInfo = pArithExprInfo;
@@ -1229,7 +1229,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
* not support such expression
* e.g., select 12+5 from table_name
*/
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pQueryInfo->fieldsInfo.numOfOutput > TSDB_MAX_COLUMNS) {
@@ -1238,7 +1238,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
}
if (!functionCompatibleCheck(pQueryInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
/*
@@ -1248,7 +1248,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel
if (isSTable) {
tscTansformSQLFuncForSTableQuery(pQueryInfo);
- if (hasUnsupportFunctionsForSTableQuery(pQueryInfo)) {
+ if (hasUnsupportFunctionsForSTableQuery(pCmd, pQueryInfo)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
@@ -1373,7 +1373,7 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum
return numOfTotalColumns;
}
-int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pItem) {
+int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExprItem* pItem) {
const char* msg0 = "invalid column name";
const char* msg1 = "tag for normal table query is not allowed";
@@ -1382,7 +1382,7 @@ int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pI
if (pItem->pNode->nSQLOptr == TK_ALL) { // project on all fields
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
if (getTableIndexByName(&pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
// all meters columns are required
@@ -1398,8 +1398,8 @@ int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pI
} else if (pItem->pNode->nSQLOptr == TK_ID) { // simple column projection query
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ if (getColumnIndexByName(pCmd, &pItem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
@@ -1410,7 +1410,7 @@ int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pI
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) && UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
addProjectQueryCol(pQueryInfo, startPos, &index, pItem);
@@ -1422,7 +1422,7 @@ int32_t addProjectionExprAndResultField(SQueryInfo* pQueryInfo, tSQLExprItem* pI
return TSDB_CODE_SUCCESS;
}
-static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema, int32_t functionID, char* aliasName,
+static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSchema* pSchema, int32_t functionID, char* aliasName,
int32_t resColIdx, SColumnIndex* pColIndex) {
int16_t type = 0;
int16_t bytes = 0;
@@ -1434,7 +1434,7 @@ static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema,
if (pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BINARY ||
pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_NCHAR ||
pSchema[pColIndex->columnIndex].type == TSDB_DATA_TYPE_BOOL) {
- invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
return -1;
} else {
type = TSDB_DATA_TYPE_DOUBLE;
@@ -1471,7 +1471,7 @@ static int32_t setExprInfoForFunctions(SQueryInfo* pQueryInfo, SSchema* pSchema,
return TSDB_CODE_SUCCESS;
}
-int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult) {
+int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult) {
STableMetaInfo* pTableMetaInfo = NULL;
int32_t optr = pItem->pNode->nSQLOptr;
@@ -1489,7 +1489,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
case TK_COUNT: {
if (pItem->pNode->pParam != NULL && pItem->pNode->pParam->nExpr != 1) {
/* more than one parameter for count() function */
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
int16_t functionID = 0;
@@ -1503,7 +1503,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
if (pItem->pNode->pParam != NULL) {
SSQLToken* pToken = &pItem->pNode->pParam->a[0].pNode->colInfo;
if (pToken->z == NULL || pToken->n == 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0];
@@ -1513,7 +1513,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
SSQLToken tmpToken = pParamElem->pNode->colInfo;
if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
index = (SColumnIndex){0, PRIMARYKEY_TIMESTAMP_COL_INDEX};
@@ -1521,8 +1521,8 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, TSDB_DATA_TYPE_BIGINT, size, size, false);
} else {
// count the number of meters created according to the super table
- if (getColumnIndexByName(pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ if (getColumnIndexByName(pCmd, pToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -1583,18 +1583,18 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
if (pItem->pNode->pParam == NULL || (optr != TK_LEASTSQUARES && pItem->pNode->pParam->nExpr != 1) ||
(optr == TK_LEASTSQUARES && pItem->pNode->pParam->nExpr != 3)) {
/* no parameters or more than one parameter for function */
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]);
if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if ((getColumnIndexByName(&pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) ||
+ if ((getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) ||
index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
// 2. check if sql function can be applied on this column data type
@@ -1603,7 +1603,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
int16_t colType = pSchema->type;
if (colType <= TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int16_t resultType = 0;
@@ -1633,7 +1633,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// functions can not be applied to tags
if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, functionID, &index, resultType, resultSize, resultSize, false);
@@ -1685,23 +1685,23 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
int16_t functionID = 0;
if (changeFunctionID(optr, &functionID) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg9);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
if (!requireAllFields) {
if (pItem->pNode->pParam->nExpr < 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pItem->pNode->pParam->nExpr > 1 && (pItem->aliasName != NULL && strlen(pItem->aliasName) > 0)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg8);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
/* in first/last function, multiple columns can be add to resultset */
for (int32_t i = 0; i < pItem->pNode->pParam->nExpr; ++i) {
tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[i]);
if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
@@ -1711,7 +1711,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
SSQLToken tmpToken = pParamElem->pNode->colInfo;
if (getTableIndexByName(&tmpToken, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -1719,14 +1719,14 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
for (int32_t j = 0; j < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++j) {
index.columnIndex = j;
- if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex++, &index) != 0) {
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex++, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
} else {
- if (getColumnIndexByName(&pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ if (getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -1734,10 +1734,10 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// functions can not be applied to tags
if ((index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) || (index.columnIndex < 0)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
- if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i, &index) != 0) {
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i, &index) != 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -1765,7 +1765,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// multicolumn selection does not support alias name
if (pItem->aliasName != NULL && strlen(pItem->aliasName) > 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg8);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
for (int32_t j = 0; j < pQueryInfo->numOfTables; ++j) {
@@ -1774,7 +1774,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
for (int32_t i = 0; i < tscGetNumOfColumns(pTableMetaInfo->pTableMeta); ++i) {
SColumnIndex index = {.tableIndex = j, .columnIndex = i};
- if (setExprInfoForFunctions(pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i + j, &index) !=
+ if (setExprInfoForFunctions(pCmd, pQueryInfo, pSchema, functionID, pItem->aliasName, colIndex + i + j, &index) !=
0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -1794,17 +1794,17 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// 1. valid the number of parameters
if (pItem->pNode->pParam == NULL || pItem->pNode->pParam->nExpr != 2) {
/* no parameters or more than one parameter for function */
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]);
if (pParamElem->pNode->nSQLOptr != TK_ID) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ if (getColumnIndexByName(pCmd, &pParamElem->pNode->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -1812,18 +1812,18 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// functions can not be applied to tags
if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
// 2. valid the column type
int16_t colType = pSchema[index.columnIndex].type;
if (colType == TSDB_DATA_TYPE_BOOL || colType >= TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
// 3. valid the parameters
if (pParamElem[1].pNode->nSQLOptr == TK_ID) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tVariant* pVariant = &pParamElem[1].pNode->val;
@@ -1839,7 +1839,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
double dp = GET_DOUBLE_VAL(val);
if (dp < 0 || dp > TOP_BOTTOM_QUERY_LIMIT) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg5);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
resultSize = sizeof(double);
@@ -1862,7 +1862,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
int64_t nTop = GET_INT32_VAL(val);
if (nTop <= 0 || nTop > 100) { // todo use macro
- return invalidSqlErrMsg(pQueryInfo->msg, msg5);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
int16_t functionId = 0;
@@ -1906,19 +1906,19 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
case TK_TBID: {
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg7);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
// no parameters or more than one parameter for function
if (pItem->pNode->pParam == NULL || pItem->pNode->pParam->nExpr != 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tSQLExpr* pParam = pItem->pNode->pParam->a[0].pNode;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pParam->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ if (getColumnIndexByName(pCmd, &pParam->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -1927,7 +1927,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
// functions can not be applied to normal columns
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
if (index.columnIndex < numOfCols && index.columnIndex != TSDB_TBNAME_COLUMN_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
if (index.columnIndex > 0) {
@@ -1943,7 +1943,7 @@ int32_t addExprAndResultField(SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExpr
}
if (colType == TSDB_DATA_TYPE_BOOL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
@@ -2036,7 +2036,7 @@ static int16_t doGetColumnIndex(SQueryInfo* pQueryInfo, int32_t index, SSQLToken
return columnIndex;
}
-int32_t doGetColumnIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
+int32_t doGetColumnIndexByName(SSqlCmd* pCmd, SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
const char* msg0 = "ambiguous column name";
const char* msg1 = "invalid column name";
@@ -2052,7 +2052,7 @@ int32_t doGetColumnIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColum
if (colIndex != COLUMN_INDEX_INITIAL_VAL) {
if (pIndex->columnIndex != COLUMN_INDEX_INITIAL_VAL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
} else {
pIndex->tableIndex = i;
pIndex->columnIndex = colIndex;
@@ -2067,7 +2067,7 @@ int32_t doGetColumnIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColum
}
if (pIndex->columnIndex == COLUMN_INDEX_INITIAL_VAL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -2118,7 +2118,7 @@ int32_t getTableIndexByName(SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIn
return TSDB_CODE_SUCCESS;
}
-int32_t getColumnIndexByName(const SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
+int32_t getColumnIndexByName(SSqlCmd* pCmd, const SSQLToken* pToken, SQueryInfo* pQueryInfo, SColumnIndex* pIndex) {
if (pQueryInfo->pTableMetaInfo == NULL || pQueryInfo->numOfTables == 0) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -2129,7 +2129,7 @@ int32_t getColumnIndexByName(const SSQLToken* pToken, SQueryInfo* pQueryInfo, SC
return TSDB_CODE_TSC_INVALID_SQL;
}
- return doGetColumnIndexByName(&tmpToken, pQueryInfo, pIndex);
+ return doGetColumnIndexByName(pCmd, &tmpToken, pQueryInfo, pIndex);
}
int32_t changeFunctionID(int32_t optr, int16_t* functionId) {
@@ -2412,7 +2412,7 @@ void tscRestoreSQLFuncForSTableQuery(SQueryInfo* pQueryInfo) {
}
}
-bool hasUnsupportFunctionsForSTableQuery(SQueryInfo* pQueryInfo) {
+bool hasUnsupportFunctionsForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg1 = "TWA not allowed to apply to super table directly";
const char* msg2 = "TWA only support group by tbname for super table query";
const char* msg3 = "function not support for super table query";
@@ -2422,24 +2422,24 @@ bool hasUnsupportFunctionsForSTableQuery(SQueryInfo* pQueryInfo) {
for (int32_t i = 0; i < size; ++i) {
int32_t functionId = tscSqlExprGet(pQueryInfo, i)->functionId;
if ((aAggs[functionId].nStatus & TSDB_FUNCSTATE_STABLE) == 0) {
- invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
return true;
}
}
if (tscIsTWAQuery(pQueryInfo)) {
if (pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
- invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
return true;
}
if (pQueryInfo->groupbyExpr.numOfGroupCols != 1) {
- invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
return true;
} else {
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
if (pColIndex->colIndex != TSDB_TBNAME_COLUMN_INDEX) {
- invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
return true;
}
}
@@ -2506,7 +2506,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
pQueryInfo->groupbyExpr.numOfGroupCols = pList->nExpr;
if (pList->nExpr > TSDB_MAX_TAGS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
STableMeta* pTableMeta = NULL;
@@ -2520,8 +2520,8 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
SSQLToken token = {pVar->nLen, pVar->nType, pVar->pz};
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&token, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ if (getColumnIndexByName(pCmd, &token, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
tableIndex = index.tableIndex;
@@ -2548,7 +2548,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
if (groupTag) {
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg9);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
int32_t relIndex = index.columnIndex;
@@ -2564,7 +2564,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
} else {
// check if the column type is valid, here only support the bool/tinyint/smallint/bigint group by
if (pSchema->type > TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg8);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
tscColumnListInsert(pQueryInfo->colList, &index);
@@ -2574,7 +2574,7 @@ int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd*
pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC;
if (i == 0 && pList->nExpr > 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg7);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
}
}
@@ -2610,7 +2610,7 @@ static SColumnFilterInfo* addColumnFilterInfo(SColumn* pColumn) {
return pColFilterInfo;
}
-static int32_t doExtractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter,
+static int32_t doExtractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnFilterInfo* pColumnFilter,
SColumnIndex* columnIndex, tSQLExpr* pExpr) {
const char* msg = "not supported filter condition";
@@ -2625,7 +2625,7 @@ static int32_t doExtractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnFilterIn
} else if (colType == TSDB_DATA_TYPE_FLOAT || colType == TSDB_DATA_TYPE_DOUBLE) {
colType = TSDB_DATA_TYPE_DOUBLE;
} else if ((colType == TSDB_DATA_TYPE_TIMESTAMP) && (TSDB_DATA_TYPE_BINARY == pRight->val.nType)) {
- int retVal = setColumnFilterInfoForTimestamp(pQueryInfo, &pRight->val);
+ int retVal = setColumnFilterInfoForTimestamp(pCmd, pQueryInfo, &pRight->val);
if (TSDB_CODE_SUCCESS != retVal) {
return retVal;
}
@@ -2675,7 +2675,7 @@ static int32_t doExtractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnFilterIn
pColumnFilter->lowerRelOptr = TSDB_RELATION_LIKE;
break;
default:
- return invalidSqlErrMsg(pQueryInfo->msg, msg);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
return TSDB_CODE_SUCCESS;
@@ -2859,7 +2859,7 @@ enum {
TSQL_EXPR_TBNAME = 3,
};
-static int32_t extractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnIndex* pIndex, tSQLExpr* pExpr, int32_t sqlOptr) {
+static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SColumnIndex* pIndex, tSQLExpr* pExpr, int32_t sqlOptr) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex);
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
@@ -2895,22 +2895,22 @@ static int32_t extractColumnFilterInfo(SQueryInfo* pQueryInfo, SColumnIndex* pIn
if (pColFilter->filterstr) {
if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE && pExpr->nSQLOptr != TK_LIKE) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else {
if (pExpr->nSQLOptr == TK_LIKE) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pSchema->type == TSDB_DATA_TYPE_BOOL) {
if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
}
pColumn->colIndex = *pIndex;
- return doExtractColumnFilterInfo(pQueryInfo, pColFilter, pIndex, pExpr);
+ return doExtractColumnFilterInfo(pCmd, pQueryInfo, pColFilter, pIndex, pExpr);
}
static void relToString(tSQLExpr* pExpr, char** str) {
@@ -2957,7 +2957,7 @@ static int32_t getTagCondString(tSQLExpr* pExpr, char** str) {
return tSQLExprLeafToString(pExpr, true, str);
}
-static int32_t getTablenameCond(SQueryInfo* pQueryInfo, tSQLExpr* pTableCond, SStringBuilder* sb) {
+static int32_t getTablenameCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pTableCond, SStringBuilder* sb) {
const char* msg0 = "invalid table name list";
if (pTableCond == NULL) {
@@ -2980,35 +2980,35 @@ static int32_t getTablenameCond(SQueryInfo* pQueryInfo, tSQLExpr* pTableCond, SS
}
if (ret != TSDB_CODE_SUCCESS) {
- invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
return ret;
}
-static int32_t getColumnQueryCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, int32_t relOptr) {
+static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, int32_t relOptr) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
if (!isExprDirectParentOfLeaftNode(pExpr)) { // internal node
- int32_t ret = getColumnQueryCondInfo(pQueryInfo, pExpr->pLeft, pExpr->nSQLOptr);
+ int32_t ret = getColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pLeft, pExpr->nSQLOptr);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
- return getColumnQueryCondInfo(pQueryInfo, pExpr->pRight, pExpr->nSQLOptr);
+ return getColumnQueryCondInfo(pCmd, pQueryInfo, pExpr->pRight, pExpr->nSQLOptr);
} else { // handle leaf node
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
- return extractColumnFilterInfo(pQueryInfo, &index, pExpr, relOptr);
+ return extractColumnFilterInfo(pCmd, pQueryInfo, &index, pExpr, relOptr);
}
}
-static int32_t getJoinCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
+static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
const char* msg1 = "invalid join query condition";
const char* msg2 = "join on binary/nchar not supported";
const char* msg3 = "type of join columns must be identical";
@@ -3019,7 +3019,7 @@ static int32_t getJoinCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
}
if (!isExprDirectParentOfLeaftNode(pExpr)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
STagCond* pTagCond = &pQueryInfo->tagCond;
@@ -3027,8 +3027,8 @@ static int32_t getJoinCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
SJoinNode* pRight = &pTagCond->joinInfo.right;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -3039,8 +3039,8 @@ static int32_t getJoinCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
strcpy(pLeft->tableId, pTableMetaInfo->name);
index = (SColumnIndex)COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ if (getColumnIndexByName(pCmd, &pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -3051,11 +3051,11 @@ static int32_t getJoinCondInfo(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
strcpy(pRight->tableId, pTableMetaInfo->name);
if (pTagSchema1->type != pTagSchema2->type) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (pTagSchema1->type == TSDB_DATA_TYPE_BINARY || pTagSchema1->type == TSDB_DATA_TYPE_NCHAR) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
pTagCond->joinInfo.hasJoin = true;
@@ -3094,7 +3094,7 @@ int32_t buildArithmeticExprString(tSQLExpr* pExpr, char** exprString) {
return TSDB_CODE_SUCCESS;
}
-static int32_t validateSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) {
+static int32_t validateSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) {
if (pExpr->nSQLOptr == TK_ID) {
if (*type == NON_ARITHMEIC_EXPR) {
*type = NORMAL_ARITHMETIC;
@@ -3103,7 +3103,7 @@ static int32_t validateSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnL
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pExpr->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pExpr->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -3131,7 +3131,7 @@ static int32_t validateSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnL
tSQLExprItem item = {.pNode = pExpr, .aliasName = NULL};
// sql function in selection clause, append sql function info in pSqlCmd structure sequentially
- if (addExprAndResultField(pQueryInfo, outputIndex, &item, false) != TSDB_CODE_SUCCESS) {
+ if (addExprAndResultField(pCmd, pQueryInfo, outputIndex, &item, false) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
@@ -3139,19 +3139,19 @@ static int32_t validateSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnL
return TSDB_CODE_SUCCESS;
}
-static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) {
+static int32_t validateArithmeticSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, int32_t* type) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
tSQLExpr* pLeft = pExpr->pLeft;
if (pLeft->nSQLOptr >= TK_PLUS && pLeft->nSQLOptr <= TK_REM) {
- int32_t ret = validateArithmeticSQLExpr(pLeft, pQueryInfo, pList, type);
+ int32_t ret = validateArithmeticSQLExpr(pCmd, pLeft, pQueryInfo, pList, type);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
} else {
- int32_t ret = validateSQLExpr(pLeft, pQueryInfo, pList, type);
+ int32_t ret = validateSQLExpr(pCmd, pLeft, pQueryInfo, pList, type);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -3159,12 +3159,12 @@ static int32_t validateArithmeticSQLExpr(tSQLExpr* pExpr, SQueryInfo* pQueryInfo
tSQLExpr* pRight = pExpr->pRight;
if (pRight->nSQLOptr >= TK_PLUS && pRight->nSQLOptr <= TK_REM) {
- int32_t ret = validateArithmeticSQLExpr(pRight, pQueryInfo, pList, type);
+ int32_t ret = validateArithmeticSQLExpr(pCmd, pRight, pQueryInfo, pList, type);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
} else {
- int32_t ret = validateSQLExpr(pRight, pQueryInfo, pList, type);
+ int32_t ret = validateSQLExpr(pCmd, pRight, pQueryInfo, pList, type);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -3243,7 +3243,7 @@ static void exchangeExpr(tSQLExpr* pExpr) {
}
}
-static bool validateJoinExprNode(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SColumnIndex* pLeftIndex) {
+static bool validateJoinExprNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SColumnIndex* pLeftIndex) {
const char* msg1 = "illegal column name";
const char* msg2 = "= is expected in join expression";
const char* msg3 = "join column must have same type";
@@ -3257,14 +3257,14 @@ static bool validateJoinExprNode(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SColum
}
if (pExpr->nSQLOptr != TK_EQ) {
- invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
return false;
}
SColumnIndex rightIndex = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pRight->colInfo, pQueryInfo, &rightIndex) != TSDB_CODE_SUCCESS) {
- invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ if (getColumnIndexByName(pCmd, &pRight->colInfo, pQueryInfo, &rightIndex) != TSDB_CODE_SUCCESS) {
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
return false;
}
@@ -3278,16 +3278,16 @@ static bool validateJoinExprNode(SQueryInfo* pQueryInfo, tSQLExpr* pExpr, SColum
int16_t rightType = pRightSchema[rightIndex.columnIndex].type;
if (leftType != rightType) {
- invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
return false;
} else if (pLeftIndex->tableIndex == rightIndex.tableIndex) {
- invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
return false;
}
// table to table/ super table to super table are allowed
if (UTIL_TABLE_IS_SUPER_TABLE(pLeftMeterMeta) != UTIL_TABLE_IS_SUPER_TABLE(pRightMeterMeta)) {
- invalidSqlErrMsg(pQueryInfo->msg, msg5);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
return false;
}
@@ -3320,8 +3320,8 @@ static int32_t setExprToCond(tSQLExpr** parent, tSQLExpr* pExpr, const char* msg
return TSDB_CODE_SUCCESS;
}
-static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t* type,
- int32_t parentOptr) {
+static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pCondExpr,
+ int32_t* type, int32_t parentOptr) {
const char* msg1 = "table query cannot use tags filter";
const char* msg2 = "illegal column name";
const char* msg3 = "only one query time range allowed";
@@ -3337,8 +3337,8 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
int32_t ret = TSDB_CODE_SUCCESS;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ if (getColumnIndexByName(pCmd, &pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
assert(isExprDirectParentOfLeaftNode(*pExpr));
@@ -3347,7 +3347,7 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
STableMeta* pTableMeta = pTableMetaInfo->pTableMeta;
if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range
- if (!validateJoinExprNode(pQueryInfo, *pExpr, &index)) {
+ if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -3370,31 +3370,31 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
} else if (index.columnIndex >= tscGetNumOfColumns(pTableMeta) || index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
// query on tags, check for tag query condition
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
// check for like expression
if ((*pExpr)->nSQLOptr == TK_LIKE) {
if (pRight->val.nLen > TSDB_PATTERN_STRING_MAX_LEN) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg8);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
if ((!isTablenameToken(&pLeft->colInfo)) && pSchema[index.columnIndex].type != TSDB_DATA_TYPE_BINARY &&
pSchema[index.columnIndex].type != TSDB_DATA_TYPE_NCHAR) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
// in case of in operator, keep it in a seperate attribute
if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) {
if (!validTableNameOptr(*pExpr)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg7);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pCondExpr->pTableCond == NULL) {
@@ -3402,19 +3402,19 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
pCondExpr->relType = parentOptr;
pCondExpr->tableCondIndex = index.tableIndex;
} else {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
*type = TSQL_EXPR_TBNAME;
*pExpr = NULL;
} else {
if (pRight->nSQLOptr == TK_ID) { // join on tag columns for stable query
- if (!validateJoinExprNode(pQueryInfo, *pExpr, &index)) {
+ if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (pCondExpr->pJoinExpr != NULL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY;
@@ -3433,7 +3433,7 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
*type = TSQL_EXPR_COLUMN;
if (pRight->nSQLOptr == TK_ID) { // other column cannot be served as the join column
- return invalidSqlErrMsg(pQueryInfo->msg, msg5);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
ret = setExprToCond(&pCondExpr->pColumnCond, *pExpr, NULL, parentOptr, pQueryInfo->msg);
@@ -3443,8 +3443,8 @@ static int32_t handleExprInQueryCond(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, S
return ret;
}
-int32_t getQueryCondExpr(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pCondExpr, int32_t* type,
- int32_t parentOptr) {
+int32_t getQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pCondExpr,
+ int32_t* type, int32_t parentOptr) {
if (pExpr == NULL) {
return TSDB_CODE_SUCCESS;
}
@@ -3462,12 +3462,12 @@ int32_t getQueryCondExpr(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pC
int32_t rightType = -1;
if (!isExprDirectParentOfLeaftNode(*pExpr)) {
- int32_t ret = getQueryCondExpr(pQueryInfo, &(*pExpr)->pLeft, pCondExpr, &leftType, (*pExpr)->nSQLOptr);
+ int32_t ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pLeft, pCondExpr, &leftType, (*pExpr)->nSQLOptr);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
- ret = getQueryCondExpr(pQueryInfo, &(*pExpr)->pRight, pCondExpr, &rightType, (*pExpr)->nSQLOptr);
+ ret = getQueryCondExpr(pCmd, pQueryInfo, &(*pExpr)->pRight, pCondExpr, &rightType, (*pExpr)->nSQLOptr);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -3478,7 +3478,7 @@ int32_t getQueryCondExpr(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pC
*/
if (leftType != rightType) {
if ((*pExpr)->nSQLOptr == TK_OR && (leftType + rightType != TSQL_EXPR_TBNAME + TSQL_EXPR_TAG)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -3488,7 +3488,7 @@ int32_t getQueryCondExpr(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SCondExpr* pC
exchangeExpr(*pExpr);
- return handleExprInQueryCond(pQueryInfo, pExpr, pCondExpr, type, parentOptr);
+ return handleExprInQueryCond(pCmd, pQueryInfo, pExpr, pCondExpr, type, parentOptr);
}
static void doCompactQueryExpr(tSQLExpr** pExpr) {
@@ -3522,12 +3522,12 @@ static void doCompactQueryExpr(tSQLExpr** pExpr) {
}
}
-static void doExtractExprForSTable(tSQLExpr** pExpr, SQueryInfo* pQueryInfo, tSQLExpr** pOut, int32_t tableIndex) {
+static void doExtractExprForSTable(SSqlCmd* pCmd, tSQLExpr** pExpr, SQueryInfo* pQueryInfo, tSQLExpr** pOut, int32_t tableIndex) {
if (isExprDirectParentOfLeaftNode(*pExpr)) {
tSQLExpr* pLeft = (*pExpr)->pLeft;
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return;
}
@@ -3544,16 +3544,16 @@ static void doExtractExprForSTable(tSQLExpr** pExpr, SQueryInfo* pQueryInfo, tSQ
} else {
*pOut = tSQLExprCreate(NULL, NULL, (*pExpr)->nSQLOptr);
- doExtractExprForSTable(&(*pExpr)->pLeft, pQueryInfo, &((*pOut)->pLeft), tableIndex);
- doExtractExprForSTable(&(*pExpr)->pRight, pQueryInfo, &((*pOut)->pRight), tableIndex);
+ doExtractExprForSTable(pCmd, &(*pExpr)->pLeft, pQueryInfo, &((*pOut)->pLeft), tableIndex);
+ doExtractExprForSTable(pCmd, &(*pExpr)->pRight, pQueryInfo, &((*pOut)->pRight), tableIndex);
}
}
-static tSQLExpr* extractExprForSTable(tSQLExpr** pExpr, SQueryInfo* pQueryInfo, int32_t tableIndex) {
+static tSQLExpr* extractExprForSTable(SSqlCmd* pCmd, tSQLExpr** pExpr, SQueryInfo* pQueryInfo, int32_t tableIndex) {
tSQLExpr* pResExpr = NULL;
if (*pExpr != NULL) {
- doExtractExprForSTable(pExpr, pQueryInfo, &pResExpr, tableIndex);
+ doExtractExprForSTable(pCmd, pExpr, pQueryInfo, &pResExpr, tableIndex);
doCompactQueryExpr(&pResExpr);
}
@@ -3573,8 +3573,8 @@ int tableNameCompar(const void* lhs, const void* rhs) {
return ret > 0 ? 1 : -1;
}
-static int32_t setTableCondForSTableQuery(SQueryInfo* pQueryInfo, const char* account, tSQLExpr* pExpr,
- int16_t tableCondIndex, SStringBuilder* sb) {
+static int32_t setTableCondForSTableQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, const char* account,
+ tSQLExpr* pExpr, int16_t tableCondIndex, SStringBuilder* sb) {
const char* msg = "table name too long";
if (pExpr == NULL) {
@@ -3631,7 +3631,7 @@ static int32_t setTableCondForSTableQuery(SQueryInfo* pQueryInfo, const char* ac
taosStringBuilderDestroy(&sb1);
tfree(segments);
- invalidSqlErrMsg(pQueryInfo->msg, msg);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
return ret;
}
@@ -3674,7 +3674,7 @@ static bool validateFilterExpr(SQueryInfo* pQueryInfo) {
return true;
}
-static int32_t getTimeRangeFromExpr(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
+static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
const char* msg0 = "invalid timestamp";
const char* msg1 = "only one time stamp window allowed";
@@ -3684,15 +3684,15 @@ static int32_t getTimeRangeFromExpr(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
if (!isExprDirectParentOfLeaftNode(pExpr)) {
if (pExpr->nSQLOptr == TK_OR) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
- getTimeRangeFromExpr(pQueryInfo, pExpr->pLeft);
+ getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pLeft);
- return getTimeRangeFromExpr(pQueryInfo, pExpr->pRight);
+ return getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pRight);
} else {
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
- if (getColumnIndexByName(&pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -3703,7 +3703,7 @@ static int32_t getTimeRangeFromExpr(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
if (getTimeRange(&win, pRight, pExpr->nSQLOptr, tinfo.precision) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
// update the timestamp query range
@@ -3719,7 +3719,7 @@ static int32_t getTimeRangeFromExpr(SQueryInfo* pQueryInfo, tSQLExpr* pExpr) {
return TSDB_CODE_SUCCESS;
}
-static int32_t validateJoinExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
+static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
const char* msg1 = "super table join requires tags column";
const char* msg2 = "timestamp join condition missing";
const char* msg3 = "condition missing for join query";
@@ -3728,7 +3728,7 @@ static int32_t validateJoinExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
if (pQueryInfo->numOfTables == 1) {
return TSDB_CODE_SUCCESS;
} else {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
@@ -3736,12 +3736,12 @@ static int32_t validateJoinExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // for stable join, tag columns
// must be present for join
if (pCondExpr->pJoinExpr == NULL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
if (!pCondExpr->tsJoin) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
return TSDB_CODE_SUCCESS;
@@ -3769,12 +3769,12 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) {
}
}
-static void doAddJoinTagsColumnsIntoTagList(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
+static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
SColumnIndex index = {0};
- if (getColumnIndexByName(&pCondExpr->pJoinExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
tscError("%p: invalid column name (left)", pQueryInfo);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -3782,7 +3782,7 @@ static void doAddJoinTagsColumnsIntoTagList(SQueryInfo* pQueryInfo, SCondExpr* p
index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
tscColumnListInsert(pTableMetaInfo->tagColList, &index);
- if (getColumnIndexByName(&pCondExpr->pJoinExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &pCondExpr->pJoinExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
tscError("%p: invalid column name (right)", pQueryInfo);
}
pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex);
@@ -3792,7 +3792,7 @@ static void doAddJoinTagsColumnsIntoTagList(SQueryInfo* pQueryInfo, SCondExpr* p
}
}
-static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr, tSQLExpr** pExpr) {
+static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr, tSQLExpr** pExpr) {
int32_t ret = TSDB_CODE_SUCCESS;
if (pCondExpr->pTagCond == NULL) {
@@ -3800,7 +3800,7 @@ static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr,
}
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
- tSQLExpr* p1 = extractExprForSTable(pExpr, pQueryInfo, i);
+ tSQLExpr* p1 = extractExprForSTable(pCmd, pExpr, pQueryInfo, i);
if (p1 == NULL) { // no query condition on this table
continue;
}
@@ -3808,7 +3808,7 @@ static int32_t getTagQueryCondExpr(SQueryInfo* pQueryInfo, SCondExpr* pCondExpr,
tExprNode* p = NULL;
SArray* colList = taosArrayInit(10, sizeof(SColIndex));
- ret = exprTreeFromSqlExpr(&p, p1, NULL, pQueryInfo, colList);
+ ret = exprTreeFromSqlExpr(pCmd, &p, p1, NULL, pQueryInfo, colList);
SBufferWriter bw = tbufInitWriter(NULL, false);
TRY(0) {
@@ -3859,11 +3859,11 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql
SCondExpr condExpr = {0};
if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1);
}
int32_t type = 0;
- if ((ret = getQueryCondExpr(pQueryInfo, pExpr, &condExpr, &type, (*pExpr)->nSQLOptr)) != TSDB_CODE_SUCCESS) {
+ if ((ret = getQueryCondExpr(&pSql->cmd, pQueryInfo, pExpr, &condExpr, &type, (*pExpr)->nSQLOptr)) != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -3873,46 +3873,46 @@ int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql
condExpr.pTagCond = (*pExpr);
// 1. check if it is a join query
- if ((ret = validateJoinExpr(pQueryInfo, &condExpr)) != TSDB_CODE_SUCCESS) {
+ if ((ret = validateJoinExpr(&pSql->cmd, pQueryInfo, &condExpr)) != TSDB_CODE_SUCCESS) {
return ret;
}
// 2. get the query time range
- if ((ret = getTimeRangeFromExpr(pQueryInfo, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) {
+ if ((ret = getTimeRangeFromExpr(&pSql->cmd, pQueryInfo, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) {
return ret;
}
// 3. get the tag query condition
- if ((ret = getTagQueryCondExpr(pQueryInfo, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) {
+ if ((ret = getTagQueryCondExpr(&pSql->cmd, pQueryInfo, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) {
return ret;
}
// 4. get the table name query condition
- if ((ret = getTablenameCond(pQueryInfo, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) {
+ if ((ret = getTablenameCond(&pSql->cmd, pQueryInfo, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) {
return ret;
}
// 5. other column query condition
- if ((ret = getColumnQueryCondInfo(pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) {
+ if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) {
return ret;
}
// 6. join condition
- if ((ret = getJoinCondInfo(pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) {
+ if ((ret = getJoinCondInfo(&pSql->cmd, pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) {
return ret;
}
// 7. query condition for table name
pQueryInfo->tagCond.relType = (condExpr.relType == TK_AND) ? TSDB_RELATION_AND : TSDB_RELATION_OR;
- ret = setTableCondForSTableQuery(pQueryInfo, getAccountId(pSql), condExpr.pTableCond, condExpr.tableCondIndex, &sb);
+ ret = setTableCondForSTableQuery(&pSql->cmd, pQueryInfo, getAccountId(pSql), condExpr.pTableCond, condExpr.tableCondIndex, &sb);
taosStringBuilderDestroy(&sb);
if (!validateFilterExpr(pQueryInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2);
}
- doAddJoinTagsColumnsIntoTagList(pQueryInfo, &condExpr);
+ doAddJoinTagsColumnsIntoTagList(&pSql->cmd, pQueryInfo, &condExpr);
cleanQueryExpr(&condExpr);
return ret;
@@ -4007,7 +4007,7 @@ int32_t getTimeRange(STimeWindow* win, tSQLExpr* pRight, int32_t optr, int16_t t
}
// todo error !!!!
-int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo) {
+int32_t tsRewriteFieldNameIfNecessary(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char rep[] = {'(', ')', '*', ',', '.', '/', '\\', '+', '-', '%', ' '};
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
@@ -4030,7 +4030,7 @@ int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo) {
for (int32_t j = i + 1; j < pQueryInfo->fieldsInfo.numOfOutput; ++j) {
if (strncasecmp(fieldName, tscFieldInfoGetField(&pQueryInfo->fieldsInfo, j)->name, (TSDB_COL_NAME_LEN - 1)) == 0) {
const char* msg = "duplicated column name in new table";
- return invalidSqlErrMsg(pQueryInfo->msg, msg);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
}
}
@@ -4038,7 +4038,7 @@ int32_t tsRewriteFieldNameIfNecessary(SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
-int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
+int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
tVariantList* pFillToken = pQuerySQL->fillType;
tVariantListItem* pItem = &pFillToken->a[0];
@@ -4049,7 +4049,7 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
const char* msg2 = "invalid fill option";
if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
@@ -4081,7 +4081,7 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
pQueryInfo->fillType = TSDB_FILL_SET_VALUE;
if (pFillToken->nExpr == 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
int32_t startPos = 1;
@@ -4110,7 +4110,7 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
int32_t ret = tVariantDump(&pFillToken->a[j].pVar, (char*)&pQueryInfo->fillVal[i], pFields->type, true);
if (ret != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
}
@@ -4128,7 +4128,7 @@ int32_t parseFillClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL) {
}
}
} else {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
return TSDB_CODE_SUCCESS;
@@ -4152,7 +4152,7 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) {
}
}
-int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema) {
+int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema* pSchema) {
const char* msg0 = "only support order by primary timestamp";
const char* msg1 = "invalid column name";
const char* msg2 = "only support order by primary timestamp and queried column";
@@ -4175,11 +4175,11 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
*/
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
if (pSortorder->nExpr > 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
} else {
if (pSortorder->nExpr > 2) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
@@ -4195,8 +4195,8 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
SColumnIndex index = {0};
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query
- if (getColumnIndexByName(&columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ if (getColumnIndexByName(pCmd, &columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
bool orderByTags = false;
@@ -4207,7 +4207,7 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
// it is a tag column
if (pQueryInfo->groupbyExpr.columnInfo == NULL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0);
if (relTagIndex == pColIndex->colIndex) {
@@ -4222,7 +4222,7 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
}
if (!(orderByTags || orderByTS) && !isTopBottomQuery(pQueryInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
} else {
assert(!(orderByTags && orderByTS));
}
@@ -4238,7 +4238,7 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
pExpr = tscSqlExprGet(pQueryInfo, 1);
if (pExpr->colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder;
@@ -4261,12 +4261,12 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
tVariant* pVar2 = &pSortorder->a[1].pVar;
SSQLToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz};
- if (getColumnIndexByName(&cname, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ if (getColumnIndexByName(pCmd, &cname, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
} else {
pQueryInfo->order.order = pSortorder->a[1].sortOrder;
pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX;
@@ -4274,12 +4274,12 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
}
} else { // meter query
- if (getColumnIndexByName(&columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ if (getColumnIndexByName(pCmd, &columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
if (isTopBottomQuery(pQueryInfo)) {
@@ -4289,7 +4289,7 @@ int32_t parseOrderbyClause(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql, SSchema
pExpr = tscSqlExprGet(pQueryInfo, 1);
if (pExpr->colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
pQueryInfo->order.order = pQuerySql->pSortOrder->a[0].sortOrder;
@@ -4335,11 +4335,11 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, DEFAULT_TABLE_INDEX);
if (tscValidateName(&(pAlterSQL->name)) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (tscSetTableFullName(pTableMetaInfo, &(pAlterSQL->name), pSql) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
int32_t ret = tscGetTableMeta(pSql, pTableMetaInfo);
@@ -4352,19 +4352,19 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN ||
pAlterSQL->type == TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN) {
if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) && (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo))) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
} else if ((pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN || pAlterSQL->type == TSDB_ALTER_TABLE_DROP_COLUMN) &&
UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) {
tFieldList* pFieldList = pAlterSQL->pAddColumns;
if (pFieldList->nField > 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg5);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
if (!validateOneTags(pCmd, &pFieldList->p[0])) {
@@ -4374,31 +4374,31 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &pFieldList->p[0]);
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_DROP_TAG_COLUMN) {
if (tscGetNumOfTags(pTableMeta) == 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg7);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
// numOfTags == 1
if (pAlterSQL->varList->nExpr > 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg8);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8);
}
tVariantListItem* pItem = &pAlterSQL->varList->a[0];
if (pItem->pVar.nLen >= TSDB_COL_NAME_LEN) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg9);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
SColumnIndex index = COLUMN_INDEX_INITIALIZER;
SSQLToken name = {.z = pItem->pVar.pz, .n = pItem->pVar.nLen, .type = TK_STRING};
- if (getColumnIndexByName(&name, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &name, pQueryInfo, &index) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
int32_t numOfCols = tscGetNumOfColumns(pTableMeta);
if (index.columnIndex < numOfCols) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg10);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg10);
} else if (index.columnIndex == numOfCols) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg11);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg11);
}
char name1[128] = {0};
@@ -4416,23 +4416,23 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tVariantListItem* pDstItem = &pAlterSQL->varList->a[1];
if (pSrcItem->pVar.nLen >= TSDB_COL_NAME_LEN || pDstItem->pVar.nLen >= TSDB_COL_NAME_LEN) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg9);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
if (pSrcItem->pVar.nType != TSDB_DATA_TYPE_BINARY || pDstItem->pVar.nType != TSDB_DATA_TYPE_BINARY) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg10);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg10);
}
SColumnIndex srcIndex = COLUMN_INDEX_INITIALIZER;
SColumnIndex destIndex = COLUMN_INDEX_INITIALIZER;
SSQLToken srcToken = {.z = pSrcItem->pVar.pz, .n = pSrcItem->pVar.nLen, .type = TK_STRING};
- if (getColumnIndexByName(&srcToken, pQueryInfo, &srcIndex) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &srcToken, pQueryInfo, &srcIndex) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
SSQLToken destToken = {.z = pDstItem->pVar.pz, .n = pDstItem->pVar.nLen, .type = TK_STRING};
- if (getColumnIndexByName(&destToken, pQueryInfo, &destIndex) == TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &destToken, pQueryInfo, &destIndex) == TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -4452,20 +4452,21 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
tVariantList* pVarList = pAlterSQL->varList;
tVariant* pTagName = &pVarList->a[0].pVar;
+ int16_t numOfTags = tscGetNumOfTags(pTableMeta);
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SSQLToken name = {.type = TK_STRING, .z = pTagName->pz, .n = pTagName->nLen};
- if (getColumnIndexByName(&name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
+ if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (columnIndex.columnIndex < tscGetNumOfColumns(pTableMeta)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg12);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg12);
}
SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex);
if (tVariantDump(&pVarList->a[1].pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg13);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg13);
}
pAlterSQL->tagData.dataLen = pTagsSchema->bytes;
@@ -4473,10 +4474,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
// validate the length of binary
if ((pTagsSchema->type == TSDB_DATA_TYPE_BINARY || pTagsSchema->type == TSDB_DATA_TYPE_NCHAR) &&
(pVarList->a[1].pVar.nLen + VARSTR_HEADER_SIZE) > pTagsSchema->bytes) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg14);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg14);
}
-
- int32_t size = sizeof(SUpdateTableTagValMsg) + pTagsSchema->bytes + TSDB_EXTRA_PAYLOAD_SIZE;
+
+ int32_t schemaLen = sizeof(STColumn) * numOfTags;
+ int32_t size = sizeof(SUpdateTableTagValMsg) + pTagsSchema->bytes + schemaLen + TSDB_EXTRA_PAYLOAD_SIZE;
+
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, size)) {
tscError("%p failed to malloc for alter table msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
@@ -4487,29 +4490,43 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pUpdateMsg->tid = htonl(pTableMeta->sid);
pUpdateMsg->uid = htobe64(pTableMeta->uid);
pUpdateMsg->colId = htons(pTagsSchema->colId);
- pUpdateMsg->type = htons(pTagsSchema->type);
- pUpdateMsg->bytes = htons(pTagsSchema->bytes);
pUpdateMsg->tversion = htons(pTableMeta->tversion);
-
- tVariantDump(&pVarList->a[1].pVar, pUpdateMsg->data, pTagsSchema->type, true);
+ pUpdateMsg->numOfTags = htons(numOfTags);
+ pUpdateMsg->schemaLen = htonl(schemaLen);
+
+ // the schema is located after the msg body, then followed by true tag value
+ char* d = pUpdateMsg->data;
+ SSchema* pTagCols = tscGetTableTagSchema(pTableMeta);
+ for (int i = 0; i < numOfTags; ++i) {
+ STColumn* pCol = (STColumn*) d;
+ pCol->colId = htons(pTagCols[i].colId);
+ pCol->bytes = htons(pTagCols[i].bytes);
+ pCol->type = pTagCols[i].type;
+ pCol->offset = 0;
+
+ d += sizeof(STColumn);
+ }
+
+ // copy the tag value to msg body
+ tVariantDump(&pVarList->a[1].pVar, pUpdateMsg->data + schemaLen, pTagsSchema->type, true);
int32_t len = 0;
if (pTagsSchema->type != TSDB_DATA_TYPE_BINARY && pTagsSchema->type != TSDB_DATA_TYPE_NCHAR) {
len = tDataTypeDesc[pTagsSchema->type].nSize;
} else {
- len = varDataTLen(pUpdateMsg->data);
+ len = varDataTLen(pUpdateMsg->data + schemaLen);
}
pUpdateMsg->tagValLen = htonl(len); // length may be changed after dump data
- int32_t total = sizeof(SUpdateTableTagValMsg) + len;
+ int32_t total = sizeof(SUpdateTableTagValMsg) + len + schemaLen;
pUpdateMsg->head.contLen = htonl(total);
} else if (pAlterSQL->type == TSDB_ALTER_TABLE_ADD_COLUMN) {
tFieldList* pFieldList = pAlterSQL->pAddColumns;
if (pFieldList->nField > 1) {
const char* msg = "only support add one column";
- return invalidSqlErrMsg(pQueryInfo->msg, msg);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg);
}
if (!validateOneColumn(pCmd, &pFieldList->p[0])) {
@@ -4530,12 +4547,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SColumnIndex columnIndex = COLUMN_INDEX_INITIALIZER;
SSQLToken name = {.type = TK_STRING, .z = pItem->pVar.pz, .n = pItem->pVar.nLen};
- if (getColumnIndexByName(&name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg17);
+ if (getColumnIndexByName(pCmd, &name, pQueryInfo, &columnIndex) != TSDB_CODE_SUCCESS) {
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg17);
}
if (columnIndex.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg18);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg18);
}
char name1[TSDB_COL_NAME_LEN] = {0};
@@ -4547,26 +4564,26 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
return TSDB_CODE_SUCCESS;
}
-int32_t validateSqlFunctionInStreamSql(SQueryInfo* pQueryInfo) {
+int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg0 = "sample interval can not be less than 10ms.";
const char* msg1 = "functions not allowed in select clause";
if (pQueryInfo->intervalTime != 0 && pQueryInfo->intervalTime < 10) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
size_t size = taosArrayGetSize(pQueryInfo->exprList);
for (int32_t i = 0; i < size; ++i) {
int32_t functId = tscSqlExprGet(pQueryInfo, i)->functionId;
if (!IS_STREAM_QUERY_VALID(aAggs[functId].nStatus)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
return TSDB_CODE_SUCCESS;
}
-int32_t validateFunctionsInIntervalOrGroupbyQuery(SQueryInfo* pQueryInfo) {
+int32_t validateFunctionsInIntervalOrGroupbyQuery(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
bool isProjectionFunction = false;
const char* msg1 = "column projection is not compatible with interval";
@@ -4599,7 +4616,7 @@ int32_t validateFunctionsInIntervalOrGroupbyQuery(SQueryInfo* pQueryInfo) {
}
if (isProjectionFunction) {
- invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
return isProjectionFunction == true ? TSDB_CODE_TSC_INVALID_SQL : TSDB_CODE_SUCCESS;
@@ -4741,7 +4758,7 @@ bool hasTimestampForPointInterpQuery(SQueryInfo* pQueryInfo) {
return (pQueryInfo->window.skey == pQueryInfo->window.ekey) && (pQueryInfo->window.skey != 0);
}
-int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL* pQuerySql, SSqlObj* pSql) {
+int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL* pQuerySql, SSqlObj* pSql) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
const char* msg0 = "soffset/offset can not be less than 0";
@@ -4758,7 +4775,7 @@ int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL*
pQueryInfo->limit.offset, pQueryInfo->slimit.limit, pQueryInfo->slimit.offset);
if (pQueryInfo->slimit.offset < 0 || pQueryInfo->limit.offset < 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg0);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
if (pQueryInfo->limit.limit == 0) {
@@ -4772,7 +4789,7 @@ int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL*
if (!tscQueryTags(pQueryInfo)) { // local handle the super table tag query
if (tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pQueryInfo->slimit.limit > 0 || pQueryInfo->slimit.offset > 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
// for projection query on super table, all queries are subqueries
@@ -4825,7 +4842,7 @@ int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL*
}
} else {
if (pQueryInfo->slimit.limit != -1 || pQueryInfo->slimit.offset != 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
size_t size = taosArrayGetSize(pQueryInfo->exprList);
@@ -4843,7 +4860,7 @@ int32_t parseLimitClause(SQueryInfo* pQueryInfo, int32_t clauseIndex, SQuerySQL*
}
if (hasTags && hasOtherFunc) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
@@ -5138,7 +5155,7 @@ static void updateTagPrjFunction(SQueryInfo* pQueryInfo) {
* 2. if selectivity function and tagprj function both exist, there should be only
* one selectivity function exists.
*/
-static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo) {
+static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo, SSqlCmd* pCmd) {
const char* msg1 = "only one selectivity function allowed in presence of tags function";
const char* msg3 = "aggregation function should not be mixed up with projection";
@@ -5176,7 +5193,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo) {
// When the tag projection function on tag column that is not in the group by clause, aggregation function and
// selectivity function exist in select clause is not allowed.
if (numOfAggregation > 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
/*
@@ -5198,7 +5215,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo) {
}
if (((aAggs[functionId].nStatus & TSDB_FUNCSTATE_SELECTIVITY) != 0) && (functionId != TSDB_FUNC_LAST_ROW)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
@@ -5208,7 +5225,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo) {
} else {
if ((pQueryInfo->type & TSDB_QUERY_TYPE_PROJECTION_QUERY) != 0) {
if (numOfAggregation > 0 && pQueryInfo->groupbyExpr.numOfGroupCols == 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
if (numOfAggregation > 0 || numOfSelectivity > 0) {
@@ -5222,7 +5239,7 @@ static int32_t checkUpdateTagPrjFunctions(SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
-static int32_t doAddGroupbyColumnsOnDemand(SQueryInfo* pQueryInfo) {
+static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg2 = "interval not allowed in group by normal column";
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
@@ -5271,7 +5288,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SQueryInfo* pQueryInfo) {
} else {
// if this query is "group by" normal column, interval is not allowed
if (pQueryInfo->intervalTime > 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
bool hasGroupColumn = false;
@@ -5314,7 +5331,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
// check if all the tags prj columns belongs to the group by columns
if (onlyTagPrjFunction(pQueryInfo) && allTagPrjInGroupby(pQueryInfo)) {
updateTagPrjFunction(pQueryInfo);
- return doAddGroupbyColumnsOnDemand(pQueryInfo);
+ return doAddGroupbyColumnsOnDemand(pCmd, pQueryInfo);
}
// check all query functions in selection clause, multi-output functions are not allowed
@@ -5338,21 +5355,21 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
}
if (!qualified) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
if (IS_MULTIOUTPUT(aAggs[functId].nStatus) && functId != TSDB_FUNC_TOP && functId != TSDB_FUNC_BOTTOM &&
functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_PRJ) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (functId == TSDB_FUNC_COUNT && pExpr->colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
}
- if (checkUpdateTagPrjFunctions(pQueryInfo) != TSDB_CODE_SUCCESS) {
+ if (checkUpdateTagPrjFunctions(pQueryInfo, pCmd) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -5360,7 +5377,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
* group by tag function must be not changed the function name, otherwise, the group operation may fail to
* divide the subset of final result.
*/
- if (doAddGroupbyColumnsOnDemand(pQueryInfo) != TSDB_CODE_SUCCESS) {
+ if (doAddGroupbyColumnsOnDemand(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -5371,23 +5388,23 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
} else {
- return checkUpdateTagPrjFunctions(pQueryInfo);
+ return checkUpdateTagPrjFunctions(pQueryInfo, pCmd);
}
}
-int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
+int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* msg1 = "only one expression allowed";
const char* msg2 = "invalid expression in select clause";
const char* msg3 = "invalid function";
tSQLExprList* pExprList = pQuerySql->pSelection;
if (pExprList->nExpr != 1) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
tSQLExpr* pExpr = pExprList->a[0].pNode;
if (pExpr->operand.z == NULL) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// TODO redefine the function
@@ -5417,7 +5434,7 @@ int32_t doLocalQueryProcess(SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
pQueryInfo->command = TSDB_SQL_CLI_VERSION;break;
case 4:
pQueryInfo->command = TSDB_SQL_CURRENT_USER;break;
- default: { return invalidSqlErrMsg(pQueryInfo->msg, msg3); }
+ default: { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); }
}
SColumnIndex ind = {0};
@@ -5704,11 +5721,11 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
tVariant* pVar = &pSrcMeterName->a[0].pVar;
SSQLToken srcToken = {.z = pVar->pz, .n = pVar->nLen, .type = TK_STRING};
if (tscValidateName(&srcToken) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (tscSetTableFullName(pTableMetaInfo, &srcToken, pSql) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
int32_t code = tscGetTableMeta(pSql, pTableMetaInfo);
@@ -5728,31 +5745,31 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
}
// set interval value
- if (parseIntervalClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
+ if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
} else {
if ((pQueryInfo->intervalTime > 0) &&
- (validateFunctionsInIntervalOrGroupbyQuery(pQueryInfo) != TSDB_CODE_SUCCESS)) {
+ (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
// set the created table[stream] name
if (tscSetTableFullName(pTableMetaInfo, pzTableName, pSql) != TSDB_CODE_SUCCESS) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg1);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pQuerySql->selectToken.n > TSDB_MAX_SAVED_SQL_LEN) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg5);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
- if (tsRewriteFieldNameIfNecessary(pQueryInfo) != TSDB_CODE_SUCCESS) {
+ if (tsRewriteFieldNameIfNecessary(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
pCmd->numOfCols = pQueryInfo->fieldsInfo.numOfOutput;
- if (validateSqlFunctionInStreamSql(pQueryInfo) != TSDB_CODE_SUCCESS) {
+ if (validateSqlFunctionInStreamSql(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -5762,14 +5779,14 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
*/
if (pQuerySql->fillType != NULL) {
if (pQueryInfo->intervalTime == 0) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg3);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
tVariantListItem* pItem = &pQuerySql->fillType->a[0];
if (pItem->pVar.nType == TSDB_DATA_TYPE_BINARY) {
if (!((strncmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) ||
(strncmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4))) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg4);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
}
}
@@ -5817,7 +5834,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
if (pQuerySql->from == NULL) {
assert(pQuerySql->fillType == NULL && pQuerySql->pGroupby == NULL && pQuerySql->pWhere == NULL &&
pQuerySql->pSortOrder == NULL);
- return doLocalQueryProcess(pQueryInfo, pQuerySql);
+ return doLocalQueryProcess(pCmd, pQueryInfo, pQuerySql);
}
if (pQuerySql->from->nExpr > TSDB_MAX_JOIN_TABLE_NUM) {
@@ -5887,17 +5904,17 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
}
// set interval value
- if (parseIntervalClause(pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
+ if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
} else {
if ((pQueryInfo->intervalTime > 0) &&
- (validateFunctionsInIntervalOrGroupbyQuery(pQueryInfo) != TSDB_CODE_SUCCESS)) {
+ (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
}
// set order by info
- if (parseOrderbyClause(pQueryInfo, pQuerySql, tscGetTableSchema(pTableMetaInfo->pTableMeta)) != TSDB_CODE_SUCCESS) {
+ if (parseOrderbyClause(pCmd, pQueryInfo, pQuerySql, tscGetTableSchema(pTableMetaInfo->pTableMeta)) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@@ -5932,7 +5949,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
}
if (!hasTimestampForPointInterpQuery(pQueryInfo)) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg2);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
// in case of join query, time range is required.
@@ -5944,7 +5961,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
}
}
- if ((code = parseLimitClause(pQueryInfo, index, pQuerySql, pSql)) != TSDB_CODE_SUCCESS) {
+ if ((code = parseLimitClause(pCmd, pQueryInfo, index, pQuerySql, pSql)) != TSDB_CODE_SUCCESS) {
return code;
}
@@ -5967,11 +5984,11 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
int64_t timeRange = labs(pQueryInfo->window.skey - pQueryInfo->window.ekey);
// number of result is not greater than 10,000,000
if ((timeRange == 0) || (timeRange / pQueryInfo->intervalTime) > MAX_RETRIEVE_ROWS_IN_INTERVAL_QUERY) {
- return invalidSqlErrMsg(pQueryInfo->msg, msg6);
+ return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
}
- int32_t ret = parseFillClause(pQueryInfo, pQuerySql);
+ int32_t ret = parseFillClause(pCmd, pQueryInfo, pQuerySql);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -5980,19 +5997,19 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
return TSDB_CODE_SUCCESS; // Does not build query message here
}
-int32_t exprTreeFromSqlExpr(tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray* pExprInfo, SQueryInfo* pQueryInfo, SArray* pCols) {
+int32_t exprTreeFromSqlExpr(SSqlCmd* pCmd, tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray* pExprInfo, SQueryInfo* pQueryInfo, SArray* pCols) {
tExprNode* pLeft = NULL;
tExprNode* pRight= NULL;
if (pSqlExpr->pLeft != NULL) {
- int32_t ret = exprTreeFromSqlExpr(&pLeft, pSqlExpr->pLeft, pExprInfo, pQueryInfo, pCols);
+ int32_t ret = exprTreeFromSqlExpr(pCmd, &pLeft, pSqlExpr->pLeft, pExprInfo, pQueryInfo, pCols);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
}
if (pSqlExpr->pRight != NULL) {
- int32_t ret = exprTreeFromSqlExpr(&pRight, pSqlExpr->pRight, pExprInfo, pQueryInfo, pCols);
+ int32_t ret = exprTreeFromSqlExpr(pCmd, &pRight, pSqlExpr->pRight, pExprInfo, pQueryInfo, pCols);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
@@ -6027,7 +6044,7 @@ int32_t exprTreeFromSqlExpr(tExprNode **pExpr, const tSQLExpr* pSqlExpr, SArray*
}
} else if (pSqlExpr->nSQLOptr == TK_ID) { // column name, normal column arithmetic expression
SColumnIndex index = {0};
- int32_t ret = getColumnIndexByName(&pSqlExpr->colInfo, pQueryInfo, &index);
+ int32_t ret = getColumnIndexByName(pCmd, &pSqlExpr->colInfo, pQueryInfo, &index);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
}
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index a3d3b035e2c4564acd34a71fe1c1490ddc25ec75..d73983e77c704e66c1c0cf491c6b8205d749a570 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -14,8 +14,8 @@
*/
#include "os.h"
-#include "qsqltype.h"
#include "tcache.h"
+#include "tcmdtype.h"
#include "trpc.h"
#include "tscLocalMerge.h"
#include "tscLog.h"
@@ -46,10 +46,13 @@ static int32_t minMsgSize() { return tsRpcHeadSize + 100; }
static void tscSetDnodeIpList(SSqlObj* pSql, SCMVgroupInfo* pVgroupInfo) {
SRpcIpSet* pIpList = &pSql->ipList;
-
- pIpList->numOfIps = pVgroupInfo->numOfIps;
pIpList->inUse = 0;
+ if (pVgroupInfo == NULL) {
+ pIpList->numOfIps = 0;
+ return;
+ }
+ pIpList->numOfIps = pVgroupInfo->numOfIps;
for(int32_t i = 0; i < pVgroupInfo->numOfIps; ++i) {
strcpy(pIpList->fqdn[i], pVgroupInfo->ipAddr[i].fqdn);
pIpList->port[i] = pVgroupInfo->ipAddr[i].port;
@@ -539,14 +542,18 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
int32_t index = pTableMetaInfo->vgroupIndex;
assert(index >= 0);
- pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
+ if (pTableMetaInfo->vgroupList->numOfVgroups > 0) {
+ pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index];
+ }
tscDebug("%p query on stable, vgIndex:%d, numOfVgroups:%d", pSql, index, pTableMetaInfo->vgroupList->numOfVgroups);
} else {
pVgroupInfo = &pTableMeta->vgroupInfo;
}
tscSetDnodeIpList(pSql, pVgroupInfo);
- pQueryMsg->head.vgId = htonl(pVgroupInfo->vgId);
+ if (pVgroupInfo != NULL) {
+ pQueryMsg->head.vgId = htonl(pVgroupInfo->vgId);
+ }
STableIdInfo *pTableIdInfo = (STableIdInfo *)pMsg;
pTableIdInfo->tid = htonl(pTableMeta->sid);
@@ -1675,8 +1682,8 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
assert(pTableMetaInfo->pTableMeta == NULL);
- pTableMetaInfo->pTableMeta =
- (STableMeta *) taosCachePut(tscCacheHandle, pTableMetaInfo->name, pTableMeta, size, tsTableMetaKeepTimer);
+ pTableMetaInfo->pTableMeta = (STableMeta *) taosCachePut(tscCacheHandle, pTableMetaInfo->name,
+ strlen(pTableMetaInfo->name), pTableMeta, size, tsTableMetaKeepTimer);
// todo handle out of memory case
if (pTableMetaInfo->pTableMeta == NULL) {
@@ -1879,7 +1886,8 @@ int tscProcessShowRsp(SSqlObj *pSql) {
size_t size = 0;
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg, &size);
- pTableMetaInfo->pTableMeta = taosCachePut(tscCacheHandle, key, (char *)pTableMeta, size, tsTableMetaKeepTimer);
+ pTableMetaInfo->pTableMeta = taosCachePut(tscCacheHandle, key, strlen(key), (char *)pTableMeta, size,
+ tsTableMetaKeepTimer);
SSchema *pTableSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
if (pQueryInfo->colList == NULL) {
@@ -1942,16 +1950,15 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
}
int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) {
- taosCacheEmpty(tscCacheHandle);
+ taosCacheEmpty(tscCacheHandle, false);
return 0;
}
int tscProcessDropTableRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
- STableMeta *pTableMeta = taosCacheAcquireByName(tscCacheHandle, pTableMetaInfo->name);
- if (pTableMeta == NULL) {
- /* not in cache, abort */
+ STableMeta *pTableMeta = taosCacheAcquireByKey(tscCacheHandle, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
+ if (pTableMeta == NULL) { /* not in cache, abort */
return 0;
}
@@ -1975,7 +1982,7 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
- STableMeta *pTableMeta = taosCacheAcquireByName(tscCacheHandle, pTableMetaInfo->name);
+ STableMeta *pTableMeta = taosCacheAcquireByKey(tscCacheHandle, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
if (pTableMeta == NULL) { /* not in cache, abort */
return 0;
}
@@ -1989,7 +1996,7 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
if (isSuperTable) { // if it is a super table, reset whole query cache
tscDebug("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name);
- taosCacheEmpty(tscCacheHandle);
+ taosCacheEmpty(tscCacheHandle, false);
}
}
@@ -2125,7 +2132,7 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), false);
}
- pTableMetaInfo->pTableMeta = (STableMeta *)taosCacheAcquireByName(tscCacheHandle, pTableMetaInfo->name);
+ pTableMetaInfo->pTableMeta = (STableMeta *)taosCacheAcquireByKey(tscCacheHandle, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
if (pTableMetaInfo->pTableMeta != NULL) {
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
tscDebug("%p retrieve table Meta from cache, the number of columns:%d, numOfTags:%d, %p", pSql, tinfo.numOfColumns,
diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c
index 201ace43de171025b902d30d043cea04b755c02a..82cc8cc225399f2aa78da0989192b82acb3bf8eb 100644
--- a/src/client/src/tscSystem.c
+++ b/src/client/src/tscSystem.c
@@ -148,7 +148,7 @@ void taos_init_imp() {
refreshTime = refreshTime < 10 ? 10 : refreshTime;
if (tscCacheHandle == NULL) {
- tscCacheHandle = taosCacheInit(refreshTime);
+ tscCacheHandle = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, NULL, "client");
}
tscDebug("client is initialized successfully");
diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c
index 26a81c597f077e5101b3920e792c650d00ca50f3..9b6eff71232f609c18d02ab34c0c3d2ccf76312d 100644
--- a/src/client/src/tscUtil.c
+++ b/src/client/src/tscUtil.c
@@ -1115,31 +1115,6 @@ SColumn* tscColumnListInsert(SArray* pColumnList, SColumnIndex* pColIndex) {
return taosArrayGetP(pColumnList, i);
}
-SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters) {
- if (numOfFilters == 0) {
- assert(src == NULL);
- return NULL;
- }
-
- SColumnFilterInfo* pFilter = calloc(1, numOfFilters * sizeof(SColumnFilterInfo));
-
- memcpy(pFilter, src, sizeof(SColumnFilterInfo) * numOfFilters);
- for (int32_t j = 0; j < numOfFilters; ++j) {
-
- if (pFilter[j].filterstr) {
- size_t len = (size_t) pFilter[j].len + 1 * TSDB_NCHAR_SIZE;
- pFilter[j].pz = (int64_t) calloc(1, len);
-
- memcpy((char*)pFilter[j].pz, (char*)src[j].pz, (size_t)len);
- }
- }
-
- assert(src->filterstr == 0 || src->filterstr == 1);
- assert(!(src->lowerRelOptr == TSDB_RELATION_INVALID && src->upperRelOptr == TSDB_RELATION_INVALID));
-
- return pFilter;
-}
-
static void destroyFilterInfo(SColumnFilterInfo* pFilterInfo, int32_t numOfFilters) {
for(int32_t i = 0; i < numOfFilters; ++i) {
if (pFilterInfo[i].filterstr) {
diff --git a/src/common/inc/qsqltype.h b/src/common/inc/tcmdtype.h
similarity index 97%
rename from src/common/inc/qsqltype.h
rename to src/common/inc/tcmdtype.h
index 6f6493d17ca8b3a3c180332a728d0529dc6d474a..90fb5bf47854313a67e395eea7b99a992a579889 100644
--- a/src/common/inc/qsqltype.h
+++ b/src/common/inc/tcmdtype.h
@@ -13,8 +13,8 @@
* along with this program. If not, see .
*/
-#ifndef TDENGINE_QSQLCMD_H
-#define TDENGINE_QSQLCMD_H
+#ifndef TDENGINE_TSQLMSGTYPE_H
+#define TDENGINE_TSQLMSGTYPE_H
#ifdef __cplusplus
extern "C" {
@@ -109,4 +109,4 @@ extern char *sqlCmd[];
}
#endif
-#endif // TDENGINE_QSQLCMD_H
+#endif // TDENGINE_TSQLMSGTYPE_H
diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h
index baa212d8b76fd07625c19c49299efa77fb58768c..2ed4b81204050ffe2c67b6f51929829d7403b557 100644
--- a/src/common/inc/tdataformat.h
+++ b/src/common/inc/tdataformat.h
@@ -50,8 +50,8 @@ extern "C" {
typedef struct {
int8_t type; // Column type
int16_t colId; // column ID
- int32_t bytes; // column bytes
- int32_t offset; // point offset in SDataRow after the header part
+ int16_t bytes; // column bytes
+ int16_t offset; // point offset in SDataRow after the header part
} STColumn;
#define colType(col) ((col)->type)
@@ -116,7 +116,7 @@ typedef struct {
int tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, int32_t version);
void tdDestroyTSchemaBuilder(STSchemaBuilder *pBuilder);
void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, int32_t version);
-int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int32_t bytes);
+int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int16_t bytes);
STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder);
// ----------------- Data row structure
diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h
index e7927605cbfe9b0d07efe351dd67e308d5b74173..da42c064ec9a34c7ffb30299e879a55024bcec39 100644
--- a/src/common/inc/tglobal.h
+++ b/src/common/inc/tglobal.h
@@ -33,6 +33,7 @@ extern int32_t tsStatusInterval;
extern int16_t tsNumOfVnodesPerCore;
extern int16_t tsNumOfTotalVnodes;
extern int32_t tsNumOfMnodes;
+extern int32_t tsEnableVnodeBak;
// common
extern int tsRpcTimer;
diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h
index d2008c9ff8181b54db83fdd92f777ba74489ce73..10d725db32102576837621c0dc50a822ba9a0104 100644
--- a/src/common/inc/tname.h
+++ b/src/common/inc/tname.h
@@ -27,4 +27,6 @@ SSchema tGetTableNameColumnSchema();
bool tscValidateTableNameLength(size_t len);
+SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
+
#endif // TDENGINE_NAME_H
diff --git a/src/common/src/sqlcmdstr.c b/src/common/src/sqlcmdstr.c
index 8584ba79761835989ab7a3e24d88824c14d107c5..672106523e7c7eab8c606db1940dd9485e7f4c8f 100644
--- a/src/common/src/sqlcmdstr.c
+++ b/src/common/src/sqlcmdstr.c
@@ -15,4 +15,4 @@
#define TSDB_SQL_C
-#include "qsqltype.h"
+#include "tcmdtype.h"
diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c
index e5cbcfd143c642f7bc871e12d833cdcf544536be..7e551759f92faa3574c6ef842ef86154bca699d9 100644
--- a/src/common/src/tdataformat.c
+++ b/src/common/src/tdataformat.c
@@ -43,7 +43,7 @@ int tdEncodeSchema(void **buf, STSchema *pSchema) {
STColumn *pCol = schemaColAt(pSchema, i);
tlen += taosEncodeFixedI8(buf, colType(pCol));
tlen += taosEncodeFixedI16(buf, colColId(pCol));
- tlen += taosEncodeFixedI32(buf, colBytes(pCol));
+ tlen += taosEncodeFixedI16(buf, colBytes(pCol));
}
return tlen;
@@ -65,10 +65,10 @@ void *tdDecodeSchema(void *buf, STSchema **pRSchema) {
for (int i = 0; i < numOfCols; i++) {
int8_t type = 0;
int16_t colId = 0;
- int32_t bytes = 0;
+ int16_t bytes = 0;
buf = taosDecodeFixedI8(buf, &type);
buf = taosDecodeFixedI16(buf, &colId);
- buf = taosDecodeFixedI32(buf, &bytes);
+ buf = taosDecodeFixedI16(buf, &bytes);
if (tdAddColToSchema(&schemaBuilder, type, colId, bytes) < 0) {
tdDestroyTSchemaBuilder(&schemaBuilder);
return NULL;
@@ -105,7 +105,7 @@ void tdResetTSchemaBuilder(STSchemaBuilder *pBuilder, int32_t version) {
pBuilder->version = version;
}
-int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int32_t bytes) {
+int tdAddColToSchema(STSchemaBuilder *pBuilder, int8_t type, int16_t colId, int16_t bytes) {
if (!isValidDataType(type)) return -1;
if (pBuilder->nCols >= pBuilder->tCols) {
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index 225f12a2100f0e3204ff6525ecfc2c6a62ab10f1..67c104878a9c924181e1d2cbb0883bf7dd366ebe 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -41,6 +41,8 @@ int32_t tsStatusInterval = 1; // second
int16_t tsNumOfVnodesPerCore = 8;
int16_t tsNumOfTotalVnodes = TSDB_INVALID_VNODE_NUM;
int32_t tsNumOfMnodes = 3;
+int32_t tsEnableVnodeBak = 1;
+
// common
int32_t tsRpcTimer = 1000;
@@ -422,6 +424,16 @@ static void doInitGlobalConfig() {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
+ cfg.option = "vnodeBak";
+ cfg.ptr = &tsEnableVnodeBak;
+ cfg.valType = TAOS_CFG_VTYPE_INT32;
+ cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
+ cfg.minValue = 0;
+ cfg.maxValue = 1;
+ cfg.ptrLength = 1;
+ cfg.unitType = TAOS_CFG_UTYPE_NONE;
+ taosInitConfigOption(cfg);
+
cfg.option = "balance";
cfg.ptr = &tsEnableBalance;
cfg.valType = TAOS_CFG_VTYPE_INT32;
diff --git a/src/common/src/tname.c b/src/common/src/tname.c
index 2514ed26e55e54eddf54d83933beecdfbf4e06fa..295015d466456843c5ec149763d96edc2029f864 100644
--- a/src/common/src/tname.c
+++ b/src/common/src/tname.c
@@ -49,4 +49,29 @@ SSchema tGetTableNameColumnSchema() {
bool tscValidateTableNameLength(size_t len) {
return len < TSDB_TABLE_NAME_LEN;
-}
\ No newline at end of file
+}
+
+SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters) {
+ if (numOfFilters == 0) {
+ assert(src == NULL);
+ return NULL;
+ }
+
+ SColumnFilterInfo* pFilter = calloc(1, numOfFilters * sizeof(SColumnFilterInfo));
+
+ memcpy(pFilter, src, sizeof(SColumnFilterInfo) * numOfFilters);
+ for (int32_t j = 0; j < numOfFilters; ++j) {
+
+ if (pFilter[j].filterstr) {
+ size_t len = (size_t) pFilter[j].len + 1 * TSDB_NCHAR_SIZE;
+ pFilter[j].pz = (int64_t) calloc(1, len);
+
+ memcpy((char*)pFilter[j].pz, (char*)src[j].pz, (size_t)len);
+ }
+ }
+
+ assert(src->filterstr == 0 || src->filterstr == 1);
+ assert(!(src->lowerRelOptr == TSDB_RELATION_INVALID && src->upperRelOptr == TSDB_RELATION_INVALID));
+
+ return pFilter;
+}
diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c
index 3e7e8525efc90bbe877e8636a34ded4851805ace..a63396861644659f95ecbed98fd845a9a4beac21 100644
--- a/src/cq/src/cqMain.c
+++ b/src/cq/src/cqMain.c
@@ -213,6 +213,8 @@ void cqDrop(void *handle) {
pObj->pStream = NULL;
cTrace("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr);
+ tdFreeSchema(pObj->pSchema);
+ free(pObj->sqlStr);
free(pObj);
pthread_mutex_unlock(&pContext->mutex);
diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c
index 9cf024ba83cccb97fc196ae38eaace9f3e03917e..1ae128788880feb5735fd35afb0ff1a6f4f2ee1d 100644
--- a/src/dnode/src/dnodeMgmt.c
+++ b/src/dnode/src/dnodeMgmt.c
@@ -176,6 +176,7 @@ void dnodeCleanupMgmt() {
tsMgmtQset = NULL;
tsMgmtQueue = NULL;
+ vnodeCleanupResources();
}
void dnodeDispatchToMgmtQueue(SRpcMsg *pMsg) {
@@ -242,8 +243,14 @@ static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) {
int32_t vnode = atoi(de->d_name + 5);
if (vnode == 0) continue;
- vnodeList[*numOfVnodes] = vnode;
(*numOfVnodes)++;
+
+ if (*numOfVnodes >= TSDB_MAX_VNODES) {
+ dError("vgId:%d, too many vnode directory in disk, exist:%d max:%d", vnode, *numOfVnodes, TSDB_MAX_VNODES);
+ continue;
+ } else {
+ vnodeList[*numOfVnodes - 1] = vnode;
+ }
}
}
closedir(dir);
@@ -276,7 +283,7 @@ static void *dnodeOpenVnode(void *param) {
static int32_t dnodeOpenVnodes() {
int32_t *vnodeList = calloc(TSDB_MAX_VNODES, sizeof(int32_t));
- int32_t numOfVnodes;
+ int32_t numOfVnodes = 0;
int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
if (status != TSDB_CODE_SUCCESS) {
@@ -337,7 +344,7 @@ static int32_t dnodeOpenVnodes() {
void dnodeStartStream() {
int32_t vnodeList[TSDB_MAX_VNODES];
int32_t numOfVnodes = 0;
- int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
+ int32_t status = vnodeGetVnodeList(vnodeList, &numOfVnodes);
if (status != TSDB_CODE_SUCCESS) {
dInfo("get dnode list failed");
@@ -352,15 +359,14 @@ void dnodeStartStream() {
}
static void dnodeCloseVnodes() {
- int32_t *vnodeList = (int32_t *)malloc(sizeof(int32_t) * TSDB_MAX_VNODES);
- int32_t numOfVnodes;
+ int32_t vnodeList[TSDB_MAX_VNODES];
+ int32_t numOfVnodes = 0;
int32_t status;
- status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
+ status = vnodeGetVnodeList(vnodeList, &numOfVnodes);
if (status != TSDB_CODE_SUCCESS) {
dInfo("get dnode list failed");
- free(vnodeList);
return;
}
@@ -368,7 +374,6 @@ static void dnodeCloseVnodes() {
vnodeClose(vnodeList[i]);
}
- free(vnodeList);
dInfo("total vnodes:%d are all closed", numOfVnodes);
}
@@ -391,7 +396,7 @@ static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) {
pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId);
}
- void *pVnode = vnodeAccquireVnode(pCreate->cfg.vgId);
+ void *pVnode = vnodeAcquireVnode(pCreate->cfg.vgId);
if (pVnode != NULL) {
int32_t code = vnodeAlter(pVnode, pCreate);
vnodeRelease(pVnode);
diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c
index 6bbb291b6aba169710d2e564489d5bda220d6e02..66135a93e9e6c824ce5219a4f5ef65b230bbce95 100644
--- a/src/dnode/src/dnodeVRead.c
+++ b/src/dnode/src/dnodeVRead.c
@@ -98,11 +98,7 @@ void dnodeDispatchToVnodeReadQueue(SRpcMsg *pMsg) {
pHead->vgId = htonl(pHead->vgId);
pHead->contLen = htonl(pHead->contLen);
- if (pMsg->msgType == TSDB_MSG_TYPE_FETCH) {
- pVnode = vnodeGetVnode(pHead->vgId);
- } else {
- pVnode = vnodeAccquireVnode(pHead->vgId);
- }
+ pVnode = vnodeAcquireVnode(pHead->vgId);
if (pVnode == NULL) {
leftLen -= pHead->contLen;
@@ -179,24 +175,19 @@ void dnodeFreeVnodeRqueue(void *rqueue) {
// dynamically adjust the number of threads
}
-static void dnodeContinueExecuteQuery(void* pVnode, void* qhandle, SReadMsg *pMsg) {
+void dnodePutItemIntoReadQueue(void *pVnode, void *qhandle) {
SReadMsg *pRead = (SReadMsg *)taosAllocateQitem(sizeof(SReadMsg));
- pRead->rpcMsg = pMsg->rpcMsg;
- pRead->pCont = qhandle;
- pRead->contLen = 0;
pRead->rpcMsg.msgType = TSDB_MSG_TYPE_QUERY;
+ pRead->pCont = qhandle;
+ pRead->contLen = 0;
+
+ assert(pVnode != NULL);
+ taos_queue queue = vnodeAcquireRqueue(pVnode);
- taos_queue queue = vnodeGetRqueue(pVnode);
- taosWriteQitem(queue, TAOS_QTYPE_RPC, pRead);
+ taosWriteQitem(queue, TAOS_QTYPE_QUERY, pRead);
}
void dnodeSendRpcReadRsp(void *pVnode, SReadMsg *pRead, int32_t code) {
- if (code == TSDB_CODE_VND_ACTION_IN_PROGRESS) return;
- if (code == TSDB_CODE_VND_ACTION_NEED_REPROCESSED) {
- dnodeContinueExecuteQuery(pVnode, pRead->rspRet.qhandle, pRead);
- code = TSDB_CODE_SUCCESS;
- }
-
SRpcMsg rpcRsp = {
.handle = pRead->rpcMsg.handle,
.pCont = pRead->rspRet.rsp,
@@ -206,6 +197,12 @@ void dnodeSendRpcReadRsp(void *pVnode, SReadMsg *pRead, int32_t code) {
rpcSendResponse(&rpcRsp);
rpcFreeCont(pRead->rpcMsg.pCont);
+ vnodeRelease(pVnode);
+}
+
+void dnodeDispatchNonRspMsg(void *pVnode, SReadMsg *pRead, int32_t code) {
+ vnodeRelease(pVnode);
+ return;
}
static void *dnodeProcessReadQueue(void *param) {
@@ -219,9 +216,16 @@ static void *dnodeProcessReadQueue(void *param) {
break;
}
- dDebug("%p, msg:%s will be processed in vread queue", pReadMsg->rpcMsg.ahandle, taosMsg[pReadMsg->rpcMsg.msgType]);
+ dDebug("%p, msg:%s will be processed in vread queue, qtype:%d", pReadMsg->rpcMsg.ahandle,
+ taosMsg[pReadMsg->rpcMsg.msgType], type);
int32_t code = vnodeProcessRead(pVnode, pReadMsg);
- dnodeSendRpcReadRsp(pVnode, pReadMsg, code);
+
+ if (type == TAOS_QTYPE_RPC) {
+ dnodeSendRpcReadRsp(pVnode, pReadMsg, code);
+ } else {
+ dnodeDispatchNonRspMsg(pVnode, pReadMsg, code);
+ }
+
taosFreeQitem(pReadMsg);
}
diff --git a/src/inc/dnode.h b/src/inc/dnode.h
index b561c407a3415d7db27333d96e21a72d4f159d8b..096aae58f2ad9cb157ba5b700581bdf52a23f6eb 100644
--- a/src/inc/dnode.h
+++ b/src/inc/dnode.h
@@ -53,6 +53,7 @@ void *dnodeAllocateVnodeWqueue(void *pVnode);
void dnodeFreeVnodeWqueue(void *queue);
void *dnodeAllocateVnodeRqueue(void *pVnode);
void dnodeFreeVnodeRqueue(void *rqueue);
+void dnodePutItemIntoReadQueue(void *pVnode, void *qhandle);
void dnodeSendRpcVnodeWriteRsp(void *pVnode, void *param, int32_t code);
int32_t dnodeAllocateMnodePqueue();
diff --git a/src/inc/query.h b/src/inc/query.h
index 5fd2ede034ebfaaf86eecce7a429c33996606027..88badc2d7b5a849114ee4437855d7cfe1c51c1d8 100644
--- a/src/inc/query.h
+++ b/src/inc/query.h
@@ -20,6 +20,7 @@ extern "C" {
#endif
typedef void* qinfo_t;
+typedef void (*_qinfo_free_fn_t)(void*);
/**
* create the qinfo object according to QueryTableMsg
@@ -28,15 +29,13 @@ typedef void* qinfo_t;
* @param qinfo
* @return
*/
-int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryTableMsg, qinfo_t* qinfo);
+int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryTableMsg, void* param, _qinfo_free_fn_t fn, qinfo_t* qinfo);
/**
* Destroy QInfo object
* @param qinfo qhandle
- * @param fp destroy callback function, while the qhandle is destoried, invoke the fp
- * @param param free callback params
*/
-void qDestroyQueryInfo(qinfo_t qinfo, void (*fp)(void*), void* param);
+void qDestroyQueryInfo(qinfo_t qinfo);
/**
* the main query execution function, including query on both table and multitables,
@@ -45,7 +44,7 @@ void qDestroyQueryInfo(qinfo_t qinfo, void (*fp)(void*), void* param);
* @param qinfo
* @return
*/
-void qTableQuery(qinfo_t qinfo, void (*fp)(void*), void* param);
+void qTableQuery(qinfo_t qinfo);
/**
* Retrieve the produced results information, if current query is not paused or completed,
@@ -81,11 +80,16 @@ bool qHasMoreResultsToRetrieve(qinfo_t qinfo);
/**
* kill current ongoing query and free query handle automatically
* @param qinfo qhandle
- * @param fp destroy callback function, while the qhandle is destoried, invoke the fp
- * @param param free callback params
* @return
*/
-int32_t qKillQuery(qinfo_t qinfo, void (*fp)(void*), void* param);
+int32_t qKillQuery(qinfo_t qinfo);
+
+void* qOpenQueryMgmt(int32_t vgId);
+void qSetQueryMgmtClosed(void* pExecutor);
+void qCleanupQueryMgmt(void* pExecutor);
+void** qRegisterQInfo(void* pMgmt, void* qInfo);
+void** qAcquireQInfo(void* pMgmt, void** key);
+void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool needFree);
#ifdef __cplusplus
}
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index 76ca99c9ad26bb9228cd1d94cf463657bf1a5f8f..e4ee058cef2987819640a43768edf7f483b8c1bc 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -365,6 +365,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TAOS_QTYPE_FWD 1
#define TAOS_QTYPE_WAL 2
#define TAOS_QTYPE_CQ 3
+#define TAOS_QTYPE_QUERY 4
typedef enum {
TSDB_SUPER_TABLE = 0, // super table
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index cb25242d27d3f8e50af643df3c447c7aaae76484..6155f08e76646ff887a51be60a0bfa500a6f873c 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -285,9 +285,9 @@ typedef struct {
int32_t tid;
int16_t tversion;
int16_t colId;
- int16_t type;
- int16_t bytes;
int32_t tagValLen;
+ int16_t numOfTags;
+ int32_t schemaLen;
char data[];
} SUpdateTableTagValMsg;
diff --git a/src/inc/vnode.h b/src/inc/vnode.h
index 9f0c8cc24171184607b93ddaab30142ff29c4e7d..a034bc5706b3c7b3f4826ad0a35382503386669c 100644
--- a/src/inc/vnode.h
+++ b/src/inc/vnode.h
@@ -49,16 +49,19 @@ int32_t vnodeAlter(void *pVnode, SMDCreateVnodeMsg *pVnodeCfg);
int32_t vnodeClose(int32_t vgId);
void vnodeRelease(void *pVnode);
-void* vnodeAccquireVnode(int32_t vgId); // add refcount
+void* vnodeAcquireVnode(int32_t vgId); // add refcount
void* vnodeGetVnode(int32_t vgId); // keep refcount unchanged
+void* vnodeAcquireRqueue(void *);
void* vnodeGetRqueue(void *);
void* vnodeGetWqueue(int32_t vgId);
void* vnodeGetWal(void *pVnode);
int32_t vnodeProcessWrite(void *pVnode, int qtype, void *pHead, void *item);
-void vnodeBuildStatusMsg(void * param);
+int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes);
+void vnodeBuildStatusMsg(void *param);
void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes);
+void vnodeCleanupResources();
int32_t vnodeProcessRead(void *pVnode, SReadMsg *pReadMsg);
diff --git a/src/kit/CMakeLists.txt b/src/kit/CMakeLists.txt
index 66e8cf73988ab25db7544b9a52215d2279630c63..df3ce1000197b1add7f6454de7ed13c652a4d50d 100644
--- a/src/kit/CMakeLists.txt
+++ b/src/kit/CMakeLists.txt
@@ -4,3 +4,4 @@ PROJECT(TDengine)
ADD_SUBDIRECTORY(shell)
ADD_SUBDIRECTORY(taosdemo)
ADD_SUBDIRECTORY(taosdump)
+ADD_SUBDIRECTORY(taosmigrate)
\ No newline at end of file
diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c
index 3265285ccadcdd89e74a2e77eb6b1f1e3dd78ce6..9a5aedcdb79b705a9be72c13afa860fb4f5a8506 100644
--- a/src/kit/taosdemo/taosdemo.c
+++ b/src/kit/taosdemo/taosdemo.c
@@ -32,6 +32,7 @@
#include
#include
#include
+#include
#include "taos.h"
#include "tutil.h"
@@ -54,6 +55,7 @@ static struct argp_option options[] = {
{0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3},
{0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
{0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3},
+ {0, 's', "sql file", 0, "The select sql file.", 3},
{0, 'M', 0, 0, "Use metric flag.", 13},
{0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 14},
{0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 6},
@@ -79,6 +81,7 @@ typedef struct DemoArguments {
char *password;
char *database;
char *tb_prefix;
+ char *sqlFile;
bool use_metric;
bool insert_only;
char *output_file;
@@ -120,6 +123,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'o':
arguments->output_file = arg;
break;
+ case 's':
+ arguments->sqlFile = arg;
+ break;
case 'q':
arguments->mode = atoi(arg);
break;
@@ -179,10 +185,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
arguments->tb_prefix = arg;
break;
case 'M':
- arguments->use_metric = true;
+ arguments->use_metric = false;
break;
case 'x':
- arguments->insert_only = true;
+ arguments->insert_only = false;
break;
case 'c':
if (wordexp(arg, &full_path, 0) != 0) {
@@ -253,6 +259,9 @@ typedef struct {
int data_of_rate;
int64_t start_time;
bool do_aggreFunc;
+
+ char* cols;
+ bool use_metric;
sem_t mutex_sem;
int notFinished;
@@ -305,6 +314,8 @@ void rand_string(char *str, int size);
double getCurrentTime();
void callBack(void *param, TAOS_RES *res, int code);
+void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass);
+void querySqlFile(TAOS* taos, char* sqlFile);
int main(int argc, char *argv[]) {
SDemoArguments arguments = { NULL, // host
@@ -313,6 +324,7 @@ int main(int argc, char *argv[]) {
"taosdata", // password
"test", // database
"t", // tb_prefix
+ NULL,
false, // use_metric
false, // insert_only
"./output.txt", // output_file
@@ -361,7 +373,7 @@ int main(int argc, char *argv[]) {
abort();
#endif
}
-
+
enum MODE query_mode = arguments.mode;
char *ip_addr = arguments.host;
uint16_t port = arguments.port;
@@ -385,6 +397,13 @@ int main(int argc, char *argv[]) {
char dataString[STRING_LEN];
bool do_aggreFunc = true;
+ if (NULL != arguments.sqlFile) {
+ TAOS* qtaos = taos_connect(ip_addr, user, pass, db_name, port);
+ querySqlFile(qtaos, arguments.sqlFile);
+ taos_close(qtaos);
+ return 0;
+ }
+
memset(dataString, 0, STRING_LEN);
int len = 0;
@@ -495,47 +514,19 @@ int main(int argc, char *argv[]) {
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d))", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary);
}
- if (!use_metric) {
- /* Create all the tables; */
- printf("Creating %d table(s)......\n", ntables);
- for (int i = 0; i < ntables; i++) {
- snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", db_name, tb_prefix, i, cols);
- queryDB(taos, command);
- }
-
- printf("Table(s) created!\n");
- taos_close(taos);
-
- } else {
+ if (use_metric) {
/* Create metric table */
printf("Creating meters super table...\n");
snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols);
queryDB(taos, command);
printf("meters created!\n");
- /* Create all the tables; */
- printf("Creating %d table(s)......\n", ntables);
- for (int i = 0; i < ntables; i++) {
- int j;
- if (i % 10 == 0) {
- j = 10;
- } else {
- j = i % 10;
- }
- if (j % 2 == 0) {
- snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", db_name, tb_prefix, i, db_name, j, "shanghai");
- } else {
- snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", db_name, tb_prefix, i, db_name, j, "beijing");
- }
- queryDB(taos, command);
- }
-
- printf("Table(s) created!\n");
taos_close(taos);
}
+
/* Wait for table to create */
+ multiThreadCreateTable(cols, use_metric, threads, ntables, db_name, tb_prefix, ip_addr, port, user, pass);
-
/* Insert data */
double ts = getCurrentTime();
printf("Inserting data......\n");
@@ -685,6 +676,198 @@ int main(int argc, char *argv[]) {
return 0;
}
+#define MAX_SQL_SIZE 65536
+void selectSql(TAOS* taos, char* sqlcmd)
+{
+ TAOS_RES *pSql = taos_query(taos, sqlcmd);
+ int32_t code = taos_errno(pSql);
+
+ if (code != 0) {
+ printf("Failed to sqlcmd:%s, reason:%s\n", sqlcmd, taos_errstr(pSql));
+ taos_free_result(pSql);
+ exit(1);
+ }
+
+ int count = 0;
+ while (taos_fetch_row(pSql) != NULL) {
+ count++;
+ }
+
+ taos_free_result(pSql);
+ return;
+}
+
+
+/* Function to do regular expression check */
+static int regexMatch(const char *s, const char *reg, int cflags) {
+ regex_t regex;
+ char msgbuf[100] = {0};
+
+ /* Compile regular expression */
+ if (regcomp(®ex, reg, cflags) != 0) {
+ printf("Fail to compile regex\n");
+ exit(-1);
+ }
+
+ /* Execute regular expression */
+ int reti = regexec(®ex, s, 0, NULL, 0);
+ if (!reti) {
+ regfree(®ex);
+ return 1;
+ } else if (reti == REG_NOMATCH) {
+ regfree(®ex);
+ return 0;
+ } else {
+ regerror(reti, ®ex, msgbuf, sizeof(msgbuf));
+ printf("Regex match failed: %s\n", msgbuf);
+ regfree(®ex);
+ exit(-1);
+ }
+
+ return 0;
+}
+
+static int isCommentLine(char *line) {
+ if (line == NULL) return 1;
+
+ return regexMatch(line, "^\\s*#.*", REG_EXTENDED);
+}
+
+void querySqlFile(TAOS* taos, char* sqlFile)
+{
+ FILE *fp = fopen(sqlFile, "r");
+ if (fp == NULL) {
+ printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno));
+ exit(-1);
+ }
+
+ int read_len = 0;
+ char * cmd = calloc(1, MAX_SQL_SIZE);
+ size_t cmd_len = 0;
+ char * line = NULL;
+ size_t line_len = 0;
+
+ double t = getCurrentTime();
+
+ while ((read_len = getline(&line, &line_len, fp)) != -1) {
+ if (read_len >= MAX_SQL_SIZE) continue;
+ line[--read_len] = '\0';
+
+ if (read_len == 0 || isCommentLine(line)) { // line starts with #
+ continue;
+ }
+
+ if (line[read_len - 1] == '\\') {
+ line[read_len - 1] = ' ';
+ memcpy(cmd + cmd_len, line, read_len);
+ cmd_len += read_len;
+ continue;
+ }
+
+ memcpy(cmd + cmd_len, line, read_len);
+ selectSql(taos, cmd);
+ memset(cmd, 0, MAX_SQL_SIZE);
+ cmd_len = 0;
+ }
+
+ t = getCurrentTime() - t;
+ printf("run %s took %.6f second(s)\n\n", sqlFile, t);
+
+ free(cmd);
+ if (line) free(line);
+ fclose(fp);
+ return;
+}
+
+void * createTable(void *sarg)
+{
+ char command[BUFFER_SIZE] = "\0";
+
+ info *winfo = (info *)sarg;
+
+ if (!winfo->use_metric) {
+ /* Create all the tables; */
+ printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
+ for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
+ snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", winfo->db_name, winfo->tb_prefix, i, winfo->cols);
+ queryDB(winfo->taos, command);
+ }
+
+ taos_close(winfo->taos);
+
+ } else {
+ /* Create all the tables; */
+ printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
+ for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
+ int j;
+ if (i % 10 == 0) {
+ j = 10;
+ } else {
+ j = i % 10;
+ }
+ if (j % 2 == 0) {
+ snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "shanghai");
+ } else {
+ snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "beijing");
+ }
+ queryDB(winfo->taos, command);
+ }
+ taos_close(winfo->taos);
+ }
+
+ return NULL;
+}
+
+void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass) {
+ double ts = getCurrentTime();
+ printf("create table......\n");
+ pthread_t *pids = malloc(threads * sizeof(pthread_t));
+ info *infos = malloc(threads * sizeof(info));
+
+ int a = ntables / threads;
+ if (a < 1) {
+ threads = ntables;
+ a = 1;
+ }
+
+ int b = 0;
+ if (threads != 0)
+ b = ntables % threads;
+ int last = 0;
+ for (int i = 0; i < threads; i++) {
+ info *t_info = infos + i;
+ t_info->threadID = i;
+ tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
+ tstrncpy(t_info->tb_prefix, tb_prefix, MAX_TB_NAME_SIZE);
+ t_info->taos = taos_connect(ip_addr, user, pass, db_name, port);
+ t_info->start_table_id = last;
+ t_info->end_table_id = i < b ? last + a : last + a - 1;
+ last = t_info->end_table_id + 1;
+ t_info->use_metric = use_metric;
+ t_info->cols = cols;
+ pthread_create(pids + i, NULL, createTable, t_info);
+ }
+
+ for (int i = 0; i < threads; i++) {
+ pthread_join(pids[i], NULL);
+ }
+
+ double t = getCurrentTime() - ts;
+ printf("Spent %.4f seconds to create %d tables with %d connections\n", t, ntables, threads);
+
+ for (int i = 0; i < threads; i++) {
+ info *t_info = infos + i;
+ taos_close(t_info->taos);
+ sem_destroy(&(t_info->mutex_sem));
+ sem_destroy(&(t_info->lock_sem));
+ }
+
+ free(pids);
+ free(infos);
+
+ return ;
+}
+
void *readTable(void *sarg) {
info *rinfo = (info *)sarg;
TAOS *taos = rinfo->taos;
diff --git a/src/kit/taosmigrate/CMakeLists.txt b/src/kit/taosmigrate/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..85b2f33f01f65cd6d49d47440455ed8ef53d0bdf
--- /dev/null
+++ b/src/kit/taosmigrate/CMakeLists.txt
@@ -0,0 +1,18 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
+PROJECT(TDengine)
+
+IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
+ INCLUDE_DIRECTORIES(${TD_OS_DIR}/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/util/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/mnode/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/vnode/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/common/inc)
+ INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
+ AUX_SOURCE_DIRECTORY(. SRC)
+
+ ADD_EXECUTABLE(taosmigrate ${SRC})
+ TARGET_LINK_LIBRARIES(taosmigrate common tutil cJson)
+ENDIF ()
+
+SET_SOURCE_FILES_PROPERTIES(./taosmigrate.c PROPERTIES COMPILE_FLAGS -w)
diff --git a/src/kit/taosmigrate/taosmigrate.c b/src/kit/taosmigrate/taosmigrate.c
new file mode 100644
index 0000000000000000000000000000000000000000..b7bf6fc1baecc51757a6425371cc171fcca759e4
--- /dev/null
+++ b/src/kit/taosmigrate/taosmigrate.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "taosmigrate.h"
+
+
+/* The options we understand. */
+static struct argp_option options[] = {
+ {0, 'r', "data dir", 0, "data dir", 0},
+ {0, 'd', "dnodeId", 0, "dnode id", 1},
+ {0, 'p', "port", 0, "dnode port", 1},
+ {0, 'f', "fqdn", 0, "dnode fqdn", 1},
+ {0, 'g', "multi dnodes", 0, "multi dnode info, e.g. \"2 7030 fqdn1, 3 8030 fqdn2\"", 2},
+ {0}};
+
+/* Used by main to communicate with parse_opt. */
+struct arguments {
+ char* dataDir;
+ int32_t dnodeId;
+ uint16_t port;
+ char* fqdn;
+ char* dnodeGroups;
+ char** arg_list;
+ int arg_list_len;
+};
+
+/* Parse a single option. */
+static error_t parse_opt(int key, char *arg, struct argp_state *state) {
+ struct arguments *arguments = state->input;
+ switch (key) {
+ case 'w':
+ arguments->dataDir = arg;
+ break;
+ case 'd':
+ arguments->dnodeId = atoi(arg);
+ break;
+ case 'p':
+ arguments->port = atoi(arg);
+ break;
+ case 'f':
+ arguments->fqdn = arg;
+ case 'g':
+ arguments->dnodeGroups = arg;
+ break;
+ case ARGP_KEY_ARG:
+ arguments->arg_list = &state->argv[state->next - 1];
+ arguments->arg_list_len = state->argc - state->next + 1;
+ state->next = state->argc;
+
+ argp_usage(state);
+ break;
+
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+ return 0;
+}
+
+static struct argp argp = {options, parse_opt, 0, 0};
+struct arguments arguments = {NULL, 0, 0, NULL, NULL, NULL, 0};
+SdnodeGroup tsDnodeGroup = {0};
+
+int tSystemShell(const char * cmd)
+{
+ FILE * fp;
+ int res;
+ char buf[1024];
+ if (cmd == NULL) {
+ printf("tSystem cmd is NULL!\n");
+ return -1;
+ }
+
+ if ((fp = popen(cmd, "r") ) == NULL) {
+ printf("popen cmd:%s error: %s/n", cmd, strerror(errno));
+ return -1;
+ } else {
+ while(fgets(buf, sizeof(buf), fp)) {
+ printf("popen result:%s", buf);
+ }
+
+ if ((res = pclose(fp)) == -1) {
+ printf("close popen file pointer fp error!\n");
+ } else {
+ printf("popen res is :%d\n", res);
+ }
+
+ return res;
+ }
+}
+
+void taosMvFile(char* destFile, char *srcFile) {
+ char shellCmd[1024+1] = {0};
+
+ //(void)snprintf(shellCmd, 1024, "cp -rf %s %s", srcDir, destDir);
+ (void)snprintf(shellCmd, 1024, "mv -f %s %s", srcFile, destFile);
+ tSystemShell(shellCmd);
+}
+
+SdnodeIfo* getDnodeInfo(int32_t dnodeId)
+{
+ for (int32_t i = 0; i < tsDnodeGroup.dnodeNum; i++) {
+ if (dnodeId == tsDnodeGroup.dnodeArray[i].dnodeId) {
+ return &(tsDnodeGroup.dnodeArray[i]);
+ }
+ }
+
+ return NULL;
+}
+
+void parseOneDnodeInfo(char* buf, SdnodeIfo* pDnodeInfo)
+{
+ char *ptr;
+ char *p;
+ int32_t i = 0;
+ ptr = strtok_r(buf, " ", &p);
+ while(ptr != NULL) {
+ if (0 == i) {
+ pDnodeInfo->dnodeId = atoi(ptr);
+ } else if (1 == i) {
+ pDnodeInfo->port = atoi(ptr);
+ } else if (2 == i) {
+ tstrncpy(pDnodeInfo->fqdn, ptr, TSDB_FQDN_LEN);
+ } else {
+ printf("input parameter error near:%s\n", buf);
+ exit(-1);
+ }
+ i++;
+ ptr = strtok_r(NULL, " ", &p);
+ }
+
+ snprintf(pDnodeInfo->ep, TSDB_EP_LEN, "%s:%d", pDnodeInfo->fqdn, pDnodeInfo->port);
+}
+
+void saveDnodeGroups()
+{
+ if ((NULL != arguments.fqdn) && (arguments.dnodeId > 0) && (0 != arguments.port)) {
+ //printf("dnodeId:%d port:%d fqdn:%s ep:%s\n", arguments.dnodeId, arguments.port, arguments.fqdn, arguments.ep);
+
+ tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum].dnodeId = arguments.dnodeId;
+ tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum].port = arguments.port;
+ tstrncpy(tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum].fqdn, arguments.fqdn, TSDB_FQDN_LEN);
+ snprintf(tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum].ep, TSDB_EP_LEN, "%s:%d", tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum].fqdn, tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum].port);
+
+ tsDnodeGroup.dnodeNum++;
+ }
+
+ if (NULL == arguments.dnodeGroups) {
+ return;
+ }
+
+ //printf("dnodeGroups:%s\n", arguments.dnodeGroups);
+
+ char buf[1024];
+ char* str = NULL;
+ char* start = arguments.dnodeGroups;
+ while (NULL != (str = strstr(start, ","))) {
+ memcpy(buf, start, str - start);
+ // parse one dnode info: dnodeId port fqdn ep
+ parseOneDnodeInfo(buf, &(tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum]));
+ tsDnodeGroup.dnodeNum++;
+ // next
+ start = str + 1;
+ str = NULL;
+ }
+
+ if (strlen(start)) {
+ parseOneDnodeInfo(start, &(tsDnodeGroup.dnodeArray[tsDnodeGroup.dnodeNum]));
+ tsDnodeGroup.dnodeNum++;
+ }
+}
+
+int32_t main(int32_t argc, char *argv[]) {
+ memset(&tsDnodeGroup, 0, sizeof(SdnodeGroup));
+
+ argp_parse(&argp, argc, argv, 0, 0, &arguments);
+
+ if ((NULL == arguments.dataDir) || ((NULL == arguments.dnodeGroups)
+ && (NULL == arguments.fqdn || arguments.dnodeId < 1 || 0 == arguments.port))) {
+ printf("input parameter error!\n");
+ return -1;
+ }
+
+ saveDnodeGroups();
+
+ printf("===================arguments:==================\n");
+ printf("oldWal:%s\n", arguments.dataDir);
+ for (int32_t i = 0; i < tsDnodeGroup.dnodeNum; i++) {
+ printf("dnodeId:%d port:%d fqdn:%s ep:%s\n", tsDnodeGroup.dnodeArray[i].dnodeId,
+ tsDnodeGroup.dnodeArray[i].port,
+ tsDnodeGroup.dnodeArray[i].fqdn,
+ tsDnodeGroup.dnodeArray[i].ep);
+ }
+ printf("===========================\n");
+
+ // 1. modify wal for mnode
+ char mnodeWal[TSDB_FILENAME_LEN*2] = {0};
+ (void)snprintf(mnodeWal, TSDB_FILENAME_LEN*2, "%s/mnode/wal/wal0", arguments.dataDir);
+ walModWalFile(mnodeWal);
+
+ // 2. modfiy dnode config: mnodeIpList.json
+ char dnodeIpList[TSDB_FILENAME_LEN*2] = {0};
+ (void)snprintf(dnodeIpList, TSDB_FILENAME_LEN*2, "%s/dnode/mnodeIpList.json", arguments.dataDir);
+ modDnodeIpList(dnodeIpList);
+
+ // 3. modify vnode config: config.json
+ char vnodeDir[TSDB_FILENAME_LEN*2] = {0};
+ (void)snprintf(vnodeDir, TSDB_FILENAME_LEN*2, "%s/vnode", arguments.dataDir);
+ modAllVnode(vnodeDir);
+
+ return 0;
+}
+
diff --git a/src/kit/taosmigrate/taosmigrate.h b/src/kit/taosmigrate/taosmigrate.h
new file mode 100644
index 0000000000000000000000000000000000000000..a0a02e651cac94502bd3ccecfac54fc795544cde
--- /dev/null
+++ b/src/kit/taosmigrate/taosmigrate.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+#ifndef TAOS_MIGRATE_H
+#define TAOS_MIGRATE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define _GNU_SOURCE
+
+#ifndef _ALPINE
+#include
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "taosdef.h"
+#include "tutil.h"
+#include "twal.h"
+#include "tchecksum.h"
+#include "mnodeDef.h"
+#include "mnodeSdb.h"
+#include "cJSON.h"
+#include "taosmsg.h"
+#include "tglobal.h"
+#include "tsdb.h"
+
+//#include "vnode.h"
+#include "vnodeInt.h"
+
+#define MAX_DNODE_NUM 128
+
+
+typedef struct _SdnodeIfo {
+ int32_t dnodeId;
+ uint16_t port;
+ char fqdn[TSDB_FQDN_LEN+1];
+ char ep[TSDB_EP_LEN+1];
+} SdnodeIfo;
+
+typedef struct _SdnodeGroup {
+ int32_t dnodeNum;
+ SdnodeIfo dnodeArray[MAX_DNODE_NUM];
+} SdnodeGroup;
+
+int tSystemShell(const char * cmd);
+void taosMvFile(char* destFile, char *srcFile) ;
+void walModWalFile(char* walfile);
+SdnodeIfo* getDnodeInfo(int32_t dnodeId);
+void modDnodeIpList(char* dnodeIpList);
+void modAllVnode(char *vnodeDir);
+
+#endif
diff --git a/src/kit/taosmigrate/taosmigrateDnodeCfg.c b/src/kit/taosmigrate/taosmigrateDnodeCfg.c
new file mode 100644
index 0000000000000000000000000000000000000000..263d5521e91deb0a2bb5be66e8988cd4b776b4bd
--- /dev/null
+++ b/src/kit/taosmigrate/taosmigrateDnodeCfg.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "taosmigrate.h"
+
+//#include "dnodeInt.h"
+//#include "dnodeMgmt.h"
+//#include "dnodeVRead.h"
+//#include "dnodeVWrite.h"
+//#include "dnodeModule.h"
+
+static SDMMnodeInfos tsDnodeIpInfos = {0};
+
+static bool dnodeReadMnodeInfos(char* dnodeIpList) {
+ FILE *fp = fopen(dnodeIpList, "r");
+ if (!fp) {
+ printf("failed to read mnodeIpList.json, file not exist\n");
+ return false;
+ }
+
+ bool ret = false;
+ int maxLen = 2000;
+ char *content = calloc(1, maxLen + 1);
+ int len = fread(content, 1, maxLen, fp);
+ if (len <= 0) {
+ free(content);
+ fclose(fp);
+ printf("failed to read mnodeIpList.json, content is null\n");
+ return false;
+ }
+
+ content[len] = 0;
+ cJSON* root = cJSON_Parse(content);
+ if (root == NULL) {
+ printf("failed to read mnodeIpList.json, invalid json format\n");
+ goto PARSE_OVER;
+ }
+
+ cJSON* inUse = cJSON_GetObjectItem(root, "inUse");
+ if (!inUse || inUse->type != cJSON_Number) {
+ printf("failed to read mnodeIpList.json, inUse not found\n");
+ goto PARSE_OVER;
+ }
+ tsDnodeIpInfos.inUse = inUse->valueint;
+
+ cJSON* nodeNum = cJSON_GetObjectItem(root, "nodeNum");
+ if (!nodeNum || nodeNum->type != cJSON_Number) {
+ printf("failed to read mnodeIpList.json, nodeNum not found\n");
+ goto PARSE_OVER;
+ }
+ tsDnodeIpInfos.nodeNum = nodeNum->valueint;
+
+ cJSON* nodeInfos = cJSON_GetObjectItem(root, "nodeInfos");
+ if (!nodeInfos || nodeInfos->type != cJSON_Array) {
+ printf("failed to read mnodeIpList.json, nodeInfos not found\n");
+ goto PARSE_OVER;
+ }
+
+ int size = cJSON_GetArraySize(nodeInfos);
+ if (size != tsDnodeIpInfos.nodeNum) {
+ printf("failed to read mnodeIpList.json, nodeInfos size not matched\n");
+ goto PARSE_OVER;
+ }
+
+ for (int i = 0; i < size; ++i) {
+ cJSON* nodeInfo = cJSON_GetArrayItem(nodeInfos, i);
+ if (nodeInfo == NULL) continue;
+
+ cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId");
+ if (!nodeId || nodeId->type != cJSON_Number) {
+ printf("failed to read mnodeIpList.json, nodeId not found\n");
+ goto PARSE_OVER;
+ }
+ tsDnodeIpInfos.nodeInfos[i].nodeId = nodeId->valueint;
+
+ cJSON *nodeEp = cJSON_GetObjectItem(nodeInfo, "nodeEp");
+ if (!nodeEp || nodeEp->type != cJSON_String || nodeEp->valuestring == NULL) {
+ printf("failed to read mnodeIpList.json, nodeName not found\n");
+ goto PARSE_OVER;
+ }
+ strncpy(tsDnodeIpInfos.nodeInfos[i].nodeEp, nodeEp->valuestring, TSDB_EP_LEN);
+
+ SdnodeIfo* pDnodeInfo = getDnodeInfo(tsDnodeIpInfos.nodeInfos[i].nodeId);
+ if (NULL == pDnodeInfo) {
+ continue;
+ }
+
+ tstrncpy(tsDnodeIpInfos.nodeInfos[i].nodeEp, pDnodeInfo->ep, TSDB_EP_LEN);
+ }
+
+ ret = true;
+
+ //printf("read mnode iplist successed, numOfIps:%d inUse:%d\n", tsDnodeIpInfos.nodeNum, tsDnodeIpInfos.inUse);
+ //for (int32_t i = 0; i < tsDnodeIpInfos.nodeNum; i++) {
+ // printf("mnode:%d, %s\n", tsDnodeIpInfos.nodeInfos[i].nodeId, tsDnodeIpInfos.nodeInfos[i].nodeEp);
+ //}
+
+PARSE_OVER:
+ free(content);
+ cJSON_Delete(root);
+ fclose(fp);
+ return ret;
+}
+
+
+static void dnodeSaveMnodeInfos(char* dnodeIpList) {
+ FILE *fp = fopen(dnodeIpList, "w");
+ if (!fp) return;
+
+ int32_t len = 0;
+ int32_t maxLen = 2000;
+ char * content = calloc(1, maxLen + 1);
+
+ len += snprintf(content + len, maxLen - len, "{\n");
+ len += snprintf(content + len, maxLen - len, " \"inUse\": %d,\n", tsDnodeIpInfos.inUse);
+ len += snprintf(content + len, maxLen - len, " \"nodeNum\": %d,\n", tsDnodeIpInfos.nodeNum);
+ len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n");
+ for (int32_t i = 0; i < tsDnodeIpInfos.nodeNum; i++) {
+ len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", tsDnodeIpInfos.nodeInfos[i].nodeId);
+ len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s\"\n", tsDnodeIpInfos.nodeInfos[i].nodeEp);
+ if (i < tsDnodeIpInfos.nodeNum -1) {
+ len += snprintf(content + len, maxLen - len, " },{\n");
+ } else {
+ len += snprintf(content + len, maxLen - len, " }]\n");
+ }
+ }
+ len += snprintf(content + len, maxLen - len, "}\n");
+
+ fwrite(content, 1, len, fp);
+ fflush(fp);
+ fclose(fp);
+ free(content);
+
+ printf("mod mnode iplist successed\n");
+}
+
+void modDnodeIpList(char* dnodeIpList)
+{
+ (void)dnodeReadMnodeInfos(dnodeIpList);
+ dnodeSaveMnodeInfos(dnodeIpList);
+ return;
+}
+
+
diff --git a/src/kit/taosmigrate/taosmigrateMnodeWal.c b/src/kit/taosmigrate/taosmigrateMnodeWal.c
new file mode 100644
index 0000000000000000000000000000000000000000..6315ff99f79cf3414fc147ac2913cc7733069e4b
--- /dev/null
+++ b/src/kit/taosmigrate/taosmigrateMnodeWal.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "taosmigrate.h"
+
+static void recordWrite(int fd, SWalHead *pHead) {
+
+ taosCalcChecksumAppend(0, (uint8_t *)pHead, sizeof(SWalHead));
+
+ int contLen = pHead->len + sizeof(SWalHead);
+
+ if(write(fd, pHead, contLen) != contLen) {
+ printf("failed to write(%s)", strerror(errno));
+ exit(-1);
+ }
+}
+
+static void recordMod(SWalHead* pHead)
+{
+ SDnodeObj *pDnode;
+
+ ESdbTable tableId = (ESdbTable)(pHead->msgType / 10);
+
+ switch (tableId) {
+ case SDB_TABLE_DNODE:
+ case SDB_TABLE_MNODE:
+ pDnode = (SDnodeObj *)pHead->cont;
+
+ printf("dnodeId:%d port:%d fqdn:%s ep:%s\n", pDnode->dnodeId, pDnode->dnodePort, pDnode->dnodeFqdn, pDnode->dnodeEp);
+
+ SdnodeIfo* pDnodeInfo = getDnodeInfo(pDnode->dnodeId);
+ if (NULL == pDnodeInfo) {
+ break;
+ }
+
+ pDnode->dnodePort = pDnodeInfo->port;
+ tstrncpy(pDnode->dnodeFqdn, pDnodeInfo->fqdn, sizeof(pDnode->dnodeFqdn));
+ tstrncpy(pDnode->dnodeEp, pDnodeInfo->ep, sizeof(pDnode->dnodeEp));
+ break;
+ #if 0
+ case SDB_TABLE_ACCOUNT:
+ SAcctObj *pAcct = (SDnodeObj *)pHead->cont;
+ break;
+ case SDB_TABLE_USER:
+ SUserObj *pUser = (SDnodeObj *)pHead->cont;
+ break;
+ case SDB_TABLE_DB:
+ SDbObj *pDb = (SDnodeObj *)pHead->cont;
+ break;
+ case SDB_TABLE_VGROUP:
+ SVgObj *pVgroup = (SDnodeObj *)pHead->cont;
+ break;
+ case SDB_TABLE_STABLE:
+ SSuperTableObj *pStable = (SDnodeObj *)pHead->cont;
+ break;
+ case SDB_TABLE_CTABLE:
+ SChildTableObj *pCTable = (SDnodeObj *)pHead->cont;
+ break;
+ #endif
+ default:
+ break;
+ }
+}
+
+void walModWalFile(char* walfile) {
+ char *buffer = malloc(1024000); // size for one record
+ if (buffer == NULL) {
+ printf("failed to malloc:%s\n", strerror(errno));
+ return ;
+ }
+
+ SWalHead *pHead = (SWalHead *)buffer;
+
+ int rfd = open(walfile, O_RDONLY);
+ if (rfd < 0) {
+ printf("failed to open %s failed:%s\n", walfile, strerror(errno));
+ free(buffer);
+ return ;
+ }
+
+ char newWalFile[32] = "wal0";
+ int wfd = open(newWalFile, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO);
+
+ if (wfd < 0) {
+ printf("wal:%s, failed to open(%s)\n", newWalFile, strerror(errno));
+ free(buffer);
+ return ;
+ }
+
+ printf("start to mod %s into %s\n", walfile, newWalFile);
+
+ while (1) {
+ memset(buffer, 0, 1024000);
+ int ret = read(rfd, pHead, sizeof(SWalHead));
+ if ( ret == 0) break;
+
+ if (ret != sizeof(SWalHead)) {
+ printf("wal:%s, failed to read head, skip, ret:%d(%s)\n", walfile, ret, strerror(errno));
+ break;
+ }
+
+ if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) {
+ printf("wal:%s, cksum is messed up, skip the rest of file\n", walfile);
+ break;
+ }
+
+ ret = read(rfd, pHead->cont, pHead->len);
+ if ( ret != pHead->len) {
+ printf("wal:%s, failed to read body, skip, len:%d ret:%d\n", walfile, pHead->len, ret);
+ break;
+ }
+
+ recordMod(pHead);
+ recordWrite(wfd, pHead);
+ }
+
+ close(rfd);
+ close(wfd);
+ free(buffer);
+
+ taosMvFile(walfile, newWalFile);
+
+ return ;
+}
+
+
+
diff --git a/src/kit/taosmigrate/taosmigrateVnodeCfg.c b/src/kit/taosmigrate/taosmigrateVnodeCfg.c
new file mode 100644
index 0000000000000000000000000000000000000000..1cb2fee353d0dd6f59afccfc69272f5a101493f2
--- /dev/null
+++ b/src/kit/taosmigrate/taosmigrateVnodeCfg.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "taosmigrate.h"
+
+
+static int32_t saveVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
+{
+ FILE *fp = fopen(cfgFile, "w");
+ if (!fp) {
+ printf("failed to open vnode cfg file for write, file:%s error:%s\n", cfgFile, strerror(errno));
+ return errno;
+ }
+
+ int32_t len = 0;
+ int32_t maxLen = 1000;
+ char * content = calloc(1, maxLen + 1);
+ if (content == NULL) {
+ fclose(fp);
+ return -1;
+ }
+
+ len += snprintf(content + len, maxLen - len, "{\n");
+ len += snprintf(content + len, maxLen - len, " \"db\": \"%s\",\n", pVnode->db);
+ len += snprintf(content + len, maxLen - len, " \"cfgVersion\": %d,\n", pVnode->cfgVersion);
+ len += snprintf(content + len, maxLen - len, " \"cacheBlockSize\": %d,\n", pVnode->tsdbCfg.cacheBlockSize);
+ len += snprintf(content + len, maxLen - len, " \"totalBlocks\": %d,\n", pVnode->tsdbCfg.totalBlocks);
+ len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnode->tsdbCfg.maxTables);
+ len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pVnode->tsdbCfg.daysPerFile);
+ len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pVnode->tsdbCfg.keep);
+ len += snprintf(content + len, maxLen - len, " \"daysToKeep1\": %d,\n", pVnode->tsdbCfg.keep1);
+ len += snprintf(content + len, maxLen - len, " \"daysToKeep2\": %d,\n", pVnode->tsdbCfg.keep2);
+ len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pVnode->tsdbCfg.minRowsPerFileBlock);
+ len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pVnode->tsdbCfg.maxRowsPerFileBlock);
+ len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnode->tsdbCfg.commitTime);
+ len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnode->tsdbCfg.precision);
+ len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnode->tsdbCfg.compression);
+ len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pVnode->walCfg.walLevel);
+ len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pVnode->syncCfg.replica);
+ len += snprintf(content + len, maxLen - len, " \"wals\": %d,\n", pVnode->walCfg.wals);
+ len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pVnode->syncCfg.quorum);
+
+ len += snprintf(content + len, maxLen - len, " \"nodeInfos\": [{\n");
+ for (int32_t i = 0; i < pVnode->syncCfg.replica; i++) {
+ len += snprintf(content + len, maxLen - len, " \"nodeId\": %d,\n", pVnode->syncCfg.nodeInfo[i].nodeId);
+ len += snprintf(content + len, maxLen - len, " \"nodeEp\": \"%s:%d\"\n", pVnode->syncCfg.nodeInfo[i].nodeFqdn, pVnode->syncCfg.nodeInfo[i].nodePort);
+
+ if (i < pVnode->syncCfg.replica - 1) {
+ len += snprintf(content + len, maxLen - len, " },{\n");
+ } else {
+ len += snprintf(content + len, maxLen - len, " }]\n");
+ }
+ }
+ len += snprintf(content + len, maxLen - len, "}\n");
+
+ fwrite(content, 1, len, fp);
+ fflush(fp);
+ fclose(fp);
+ free(content);
+
+ printf("mod vnode cfg %s successed\n", cfgFile);
+
+ return 0;
+}
+
+static int32_t readVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
+{
+ cJSON *root = NULL;
+ char *content = NULL;
+ int maxLen = 1000;
+ int32_t ret = -1;
+
+ FILE *fp = fopen(cfgFile, "r");
+ if (!fp) {
+ printf("failed to open vnode cfg file:%s to read, error:%s\n", cfgFile, strerror(errno));
+ goto PARSE_OVER;
+ }
+
+ content = calloc(1, maxLen + 1);
+ if (content == NULL) {
+ goto PARSE_OVER;
+ }
+
+ int len = fread(content, 1, maxLen, fp);
+ if (len <= 0) {
+ printf("failed to read vnode cfg, content is null, error:%s\n", strerror(errno));
+ goto PARSE_OVER;
+ }
+
+ root = cJSON_Parse(content);
+ if (root == NULL) {
+ printf("failed to json parse %s, invalid json format\n", cfgFile);
+ goto PARSE_OVER;
+ }
+
+ cJSON *db = cJSON_GetObjectItem(root, "db");
+ if (!db || db->type != cJSON_String || db->valuestring == NULL) {
+ printf("vgId:%d, failed to read vnode cfg, db not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ strcpy(pVnode->db, db->valuestring);
+
+ cJSON *cfgVersion = cJSON_GetObjectItem(root, "cfgVersion");
+ if (!cfgVersion || cfgVersion->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, cfgVersion not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->cfgVersion = cfgVersion->valueint;
+
+ cJSON *cacheBlockSize = cJSON_GetObjectItem(root, "cacheBlockSize");
+ if (!cacheBlockSize || cacheBlockSize->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, cacheBlockSize not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.cacheBlockSize = cacheBlockSize->valueint;
+
+ cJSON *totalBlocks = cJSON_GetObjectItem(root, "totalBlocks");
+ if (!totalBlocks || totalBlocks->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, totalBlocks not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.totalBlocks = totalBlocks->valueint;
+
+ cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
+ if (!maxTables || maxTables->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, maxTables not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.maxTables = maxTables->valueint;
+
+ cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
+ if (!daysPerFile || daysPerFile->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, daysPerFile not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.daysPerFile = daysPerFile->valueint;
+
+ cJSON *daysToKeep = cJSON_GetObjectItem(root, "daysToKeep");
+ if (!daysToKeep || daysToKeep->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, daysToKeep not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.keep = daysToKeep->valueint;
+
+ cJSON *daysToKeep1 = cJSON_GetObjectItem(root, "daysToKeep1");
+ if (!daysToKeep1 || daysToKeep1->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, daysToKeep1 not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.keep1 = daysToKeep1->valueint;
+
+ cJSON *daysToKeep2 = cJSON_GetObjectItem(root, "daysToKeep2");
+ if (!daysToKeep2 || daysToKeep2->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, daysToKeep2 not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.keep2 = daysToKeep2->valueint;
+
+ cJSON *minRowsPerFileBlock = cJSON_GetObjectItem(root, "minRowsPerFileBlock");
+ if (!minRowsPerFileBlock || minRowsPerFileBlock->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, minRowsPerFileBlock not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.minRowsPerFileBlock = minRowsPerFileBlock->valueint;
+
+ cJSON *maxRowsPerFileBlock = cJSON_GetObjectItem(root, "maxRowsPerFileBlock");
+ if (!maxRowsPerFileBlock || maxRowsPerFileBlock->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, maxRowsPerFileBlock not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint;
+
+ cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
+ if (!commitTime || commitTime->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, commitTime not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
+
+ cJSON *precision = cJSON_GetObjectItem(root, "precision");
+ if (!precision || precision->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, precision not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.precision = (int8_t)precision->valueint;
+
+ cJSON *compression = cJSON_GetObjectItem(root, "compression");
+ if (!compression || compression->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, compression not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->tsdbCfg.compression = (int8_t)compression->valueint;
+
+ cJSON *walLevel = cJSON_GetObjectItem(root, "walLevel");
+ if (!walLevel || walLevel->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, walLevel not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->walCfg.walLevel = (int8_t) walLevel->valueint;
+
+ cJSON *wals = cJSON_GetObjectItem(root, "wals");
+ if (!wals || wals->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, wals not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->walCfg.wals = (int8_t)wals->valueint;
+ pVnode->walCfg.keep = 0;
+
+ cJSON *replica = cJSON_GetObjectItem(root, "replica");
+ if (!replica || replica->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, replica not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->syncCfg.replica = (int8_t)replica->valueint;
+
+ cJSON *quorum = cJSON_GetObjectItem(root, "quorum");
+ if (!quorum || quorum->type != cJSON_Number) {
+ printf("vgId: %d, failed to read vnode cfg, quorum not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->syncCfg.quorum = (int8_t)quorum->valueint;
+
+ cJSON *nodeInfos = cJSON_GetObjectItem(root, "nodeInfos");
+ if (!nodeInfos || nodeInfos->type != cJSON_Array) {
+ printf("vgId:%d, failed to read vnode cfg, nodeInfos not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+
+ int size = cJSON_GetArraySize(nodeInfos);
+ if (size != pVnode->syncCfg.replica) {
+ printf("vgId:%d, failed to read vnode cfg, nodeInfos size not matched\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+
+ for (int i = 0; i < size; ++i) {
+ cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i);
+ if (nodeInfo == NULL) continue;
+
+ cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId");
+ if (!nodeId || nodeId->type != cJSON_Number) {
+ printf("vgId:%d, failed to read vnode cfg, nodeId not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+ pVnode->syncCfg.nodeInfo[i].nodeId = nodeId->valueint;
+
+ cJSON *nodeEp = cJSON_GetObjectItem(nodeInfo, "nodeEp");
+ if (!nodeEp || nodeEp->type != cJSON_String || nodeEp->valuestring == NULL) {
+ printf("vgId:%d, failed to read vnode cfg, nodeFqdn not found\n", pVnode->vgId);
+ goto PARSE_OVER;
+ }
+
+ taosGetFqdnPortFromEp(nodeEp->valuestring, pVnode->syncCfg.nodeInfo[i].nodeFqdn, &pVnode->syncCfg.nodeInfo[i].nodePort);
+ //pVnode->syncCfg.nodeInfo[i].nodePort += TSDB_PORT_SYNC;
+
+
+ SdnodeIfo* pDnodeInfo = getDnodeInfo(pVnode->syncCfg.nodeInfo[i].nodeId);
+ if (NULL == pDnodeInfo) {
+ continue;
+ }
+
+ pVnode->syncCfg.nodeInfo[i].nodePort = pDnodeInfo->port;
+ tstrncpy(pVnode->syncCfg.nodeInfo[i].nodeFqdn, pDnodeInfo->fqdn, TSDB_FQDN_LEN);
+ }
+
+ ret = 0;
+ //printf("read vnode cfg successfully, replcia:%d\n", pVnode->syncCfg.replica);
+ //for (int32_t i = 0; i < pVnode->syncCfg.replica; i++) {
+ // printf("dnode:%d, %s:%d\n", pVnode->syncCfg.nodeInfo[i].nodeId, pVnode->syncCfg.nodeInfo[i].nodeFqdn, pVnode->syncCfg.nodeInfo[i].nodePort);
+ //}
+
+PARSE_OVER:
+ tfree(content);
+ cJSON_Delete(root);
+ if (fp) fclose(fp);
+ return ret;
+}
+
+static void modVnodeCfg(char* vnodeCfg)
+{
+ int32_t ret;
+ SVnodeObj vnodeObj = {0};
+ ret = readVnodeCfg(&vnodeObj, vnodeCfg);
+ if (0 != ret) {
+ printf("read vnode cfg %s fail!\n", vnodeCfg);
+ return ;
+ }
+
+ (void)saveVnodeCfg(&vnodeObj, vnodeCfg);
+
+ return ;
+}
+
+void modAllVnode(char *vnodeDir)
+{
+ DIR *dir = opendir(vnodeDir);
+ if (dir == NULL) return;
+
+ char filename[1024];
+ struct dirent *de = NULL;
+ while ((de = readdir(dir)) != NULL) {
+ if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue;
+
+ if ((de->d_type & DT_DIR) && (strncmp(de->d_name, "vnode", 5) == 0)) {
+ memset(filename, 0, 1024);
+ snprintf(filename, 1023, "%s/%s/config.json", vnodeDir, de->d_name);
+ modVnodeCfg(filename);
+ }
+ }
+
+ closedir(dir);
+}
+
diff --git a/src/mnode/inc/mnodeDef.h b/src/mnode/inc/mnodeDef.h
index 46d2675705e5b7dc0ee53ba8db8f62a9ad5046df..b40080759f4167f634bff29480b6acb46e685e1f 100644
--- a/src/mnode/inc/mnodeDef.h
+++ b/src/mnode/inc/mnodeDef.h
@@ -135,7 +135,8 @@ typedef struct SVgObj {
char dbName[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN];
int8_t inUse;
int8_t accessState;
- int8_t reserved0[5];
+ int8_t status;
+ int8_t reserved0[4];
SVnodeGid vnodeGid[TSDB_MAX_REPLICA];
int8_t reserved1[7];
int8_t updateEnd[1];
diff --git a/src/mnode/inc/mnodeProfile.h b/src/mnode/inc/mnodeProfile.h
index c9f7cc8e2a94e6f33d55546ab547a5dc95db68f5..e39496ec9cacf431e2fefd737018d54a56e27f88 100644
--- a/src/mnode/inc/mnodeProfile.h
+++ b/src/mnode/inc/mnodeProfile.h
@@ -41,7 +41,7 @@ int32_t mnodeInitProfile();
void mnodeCleanupProfile();
SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port);
-SConnObj *mnodeAccquireConn(uint32_t connId, char *user, uint32_t ip, uint16_t port);
+SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t port);
void mnodeReleaseConn(SConnObj *pConn);
int32_t mnodeSaveQueryStreamList(SConnObj *pConn, SCMHeartBeatMsg *pHBMsg);
diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c
index 7e7c12cf088c823137350eb476af79df128139e2..c1b8256a06763414d96e90a11dd62a3530fe9365 100644
--- a/src/mnode/src/mnodeDnode.c
+++ b/src/mnode/src/mnodeDnode.c
@@ -367,7 +367,6 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
pAccess++;
mnodeDecVgroupRef(pVgroup);
}
-
}
if (pDnode->status == TAOS_DN_STATUS_OFFLINE) {
diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c
index f3d6a3d344c84cdd03b43300ed0e3e1d416ec50d..af4a09a45a1fc314a648ec528cb4caaf9860df7a 100644
--- a/src/mnode/src/mnodeProfile.c
+++ b/src/mnode/src/mnodeProfile.c
@@ -43,7 +43,7 @@
extern void *tsMnodeTmr;
static SCacheObj *tsMnodeConnCache = NULL;
-static uint32_t tsConnIndex = 0;
+static int32_t tsConnIndex = 0;
static int32_t mnodeGetQueryMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, void *pConn);
@@ -68,7 +68,7 @@ int32_t mnodeInitProfile() {
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_KILL_STREAM, mnodeProcessKillStreamMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_KILL_CONN, mnodeProcessKillConnectionMsg);
- tsMnodeConnCache = taosCacheInitWithCb(CONN_CHECK_TIME, mnodeFreeConn);
+ tsMnodeConnCache = taosCacheInit(TSDB_DATA_TYPE_INT, CONN_CHECK_TIME, false, mnodeFreeConn, "conn");
return 0;
}
@@ -89,7 +89,7 @@ SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) {
return NULL;
}
- uint32_t connId = atomic_add_fetch_32(&tsConnIndex, 1);
+ int32_t connId = atomic_add_fetch_32(&tsConnIndex, 1);
if (connId == 0) atomic_add_fetch_32(&tsConnIndex, 1);
SConnObj connObj = {
@@ -100,9 +100,7 @@ SConnObj *mnodeCreateConn(char *user, uint32_t ip, uint16_t port) {
};
tstrncpy(connObj.user, user, sizeof(connObj.user));
- char key[10];
- sprintf(key, "%u", connId);
- SConnObj *pConn = taosCachePut(tsMnodeConnCache, key, &connObj, sizeof(connObj), CONN_KEEP_TIME);
+ SConnObj *pConn = taosCachePut(tsMnodeConnCache, &connId, sizeof(int32_t), &connObj, sizeof(connObj), CONN_KEEP_TIME);
mDebug("connId:%d, is created, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port);
return pConn;
@@ -113,12 +111,9 @@ void mnodeReleaseConn(SConnObj *pConn) {
taosCacheRelease(tsMnodeConnCache, (void **)&pConn, false);
}
-SConnObj *mnodeAccquireConn(uint32_t connId, char *user, uint32_t ip, uint16_t port) {
- char key[10];
- sprintf(key, "%u", connId);
+SConnObj *mnodeAccquireConn(int32_t connId, char *user, uint32_t ip, uint16_t port) {
uint64_t expireTime = CONN_KEEP_TIME * 1000 + (uint64_t)taosGetTimestampMs();
-
- SConnObj *pConn = taosCacheUpdateExpireTimeByName(tsMnodeConnCache, key, expireTime);
+ SConnObj *pConn = taosCacheUpdateExpireTimeByName(tsMnodeConnCache, &connId, sizeof(int32_t), expireTime);
if (pConn == NULL) {
mError("connId:%d, is already destroyed, user:%s ip:%s:%u", connId, user, taosIpStr(ip), port);
return NULL;
@@ -547,7 +542,8 @@ static int32_t mnodeProcessKillQueryMsg(SMnodeMsg *pMsg) {
int32_t queryId = (int32_t)strtol(queryIdStr, NULL, 10);
- SConnObj *pConn = taosCacheAcquireByName(tsMnodeConnCache, connIdStr);
+ int32_t connId = atoi(connIdStr);
+ SConnObj *pConn = taosCacheAcquireByKey(tsMnodeConnCache, &connId, sizeof(int32_t));
if (pConn == NULL) {
mError("connId:%s, failed to kill queryId:%d, conn not exist", connIdStr, queryId);
return TSDB_CODE_MND_INVALID_CONN_ID;
@@ -576,8 +572,9 @@ static int32_t mnodeProcessKillStreamMsg(SMnodeMsg *pMsg) {
}
int32_t streamId = (int32_t)strtol(streamIdStr, NULL, 10);
+ int32_t connId = atoi(connIdStr);
- SConnObj *pConn = taosCacheAcquireByName(tsMnodeConnCache, connIdStr);
+ SConnObj *pConn = taosCacheAcquireByKey(tsMnodeConnCache, &connId, sizeof(int32_t));
if (pConn == NULL) {
mError("connId:%s, failed to kill streamId:%d, conn not exist", connIdStr, streamId);
return TSDB_CODE_MND_INVALID_CONN_ID;
@@ -594,7 +591,8 @@ static int32_t mnodeProcessKillConnectionMsg(SMnodeMsg *pMsg) {
if (strcmp(pUser->user, TSDB_DEFAULT_USER) != 0) return TSDB_CODE_MND_NO_RIGHTS;
SCMKillConnMsg *pKill = pMsg->rpcMsg.pCont;
- SConnObj * pConn = taosCacheAcquireByName(tsMnodeConnCache, pKill->queryId);
+ int32_t connId = atoi(pKill->queryId);
+ SConnObj * pConn = taosCacheAcquireByKey(tsMnodeConnCache, &connId, sizeof(int32_t));
if (pConn == NULL) {
mError("connId:%s, failed to kill, conn not exist", pKill->queryId);
return TSDB_CODE_MND_INVALID_CONN_ID;
diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c
index 06ef2cb452fb063f642b12c6b3c7c5cf44d1b673..97ffe839142d5018d7256303fb8247d89f1c7400 100644
--- a/src/mnode/src/mnodeShow.c
+++ b/src/mnode/src/mnodeShow.c
@@ -65,7 +65,7 @@ int32_t mnodeInitShow() {
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_CONNECT, mnodeProcessConnectMsg);
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_USE_DB, mnodeProcessUseMsg);
- tsMnodeShowCache = taosCacheInitWithCb(5, mnodeFreeShowObj);
+ tsMnodeShowCache = taosCacheInit(TSDB_DATA_TYPE_INT, 5, false, mnodeFreeShowObj, "show");
return 0;
}
@@ -364,10 +364,7 @@ static bool mnodeCheckShowFinished(SShowObj *pShow) {
}
static bool mnodeAccquireShowObj(SShowObj *pShow) {
- char key[10];
- sprintf(key, "%d", pShow->index);
-
- SShowObj *pSaved = taosCacheAcquireByName(tsMnodeShowCache, key);
+ SShowObj *pSaved = taosCacheAcquireByKey(tsMnodeShowCache, &pShow->index, sizeof(int32_t));
if (pSaved == pShow) {
mDebug("%p, show is accquired from cache", pShow);
return true;
@@ -378,14 +375,11 @@ static bool mnodeAccquireShowObj(SShowObj *pShow) {
static void *mnodePutShowObj(SShowObj *pShow, int32_t size) {
if (tsMnodeShowCache != NULL) {
- char key[10];
pShow->index = atomic_add_fetch_32(&tsShowObjIndex, 1);
- sprintf(key, "%d", pShow->index);
-
- SShowObj *newQhandle = taosCachePut(tsMnodeShowCache, key, pShow, size, 6);
+ SShowObj *newQhandle = taosCachePut(tsMnodeShowCache, &pShow->index, sizeof(int32_t), pShow, size, 6);
+ mDebug("%p, show is put into cache, index:%d", newQhandle, pShow->index);
free(pShow);
- mDebug("%p, show is put into cache, index:%s", newQhandle, key);
return newQhandle;
}
diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c
index 3855de41014deb0fe33ba09913cf13a62212f9ec..9a041aa4fdd2b50164493d85624c2a61d8e56f6e 100644
--- a/src/mnode/src/mnodeVgroup.c
+++ b/src/mnode/src/mnodeVgroup.c
@@ -38,6 +38,11 @@
#include "mnodeVgroup.h"
#include "mnodePeer.h"
+typedef enum {
+ TAOS_VG_STATUS_READY,
+ TAOS_VG_STATUS_DROPPING
+} EVgroupStatus;
+
static void *tsVgroupSdb = NULL;
static int32_t tsVgUpdateSize = 0;
@@ -279,7 +284,7 @@ void mnodeCheckUnCreatedVgroup(SDnodeObj *pDnode, SVnodeLoad *pVloads, int32_t o
pNextV++;
}
- if (i == openVnodes) {
+ if (i == openVnodes && pVgroup->status == TAOS_VG_STATUS_READY) {
mnodeSendCreateVgroupMsg(pVgroup, NULL);
}
@@ -728,6 +733,7 @@ void mnodeSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle) {
}
static void mnodeSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle) {
+ pVgroup->status = TAOS_VG_STATUS_DROPPING; // deleting
mDebug("vgId:%d, send drop all vnodes msg, ahandle:%p", pVgroup->vgId, ahandle);
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
SRpcIpSet ipSet = mnodeGetIpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp);
diff --git a/src/os/linux/inc/os.h b/src/os/linux/inc/os.h
index 58e255f7bc95ed0b1bf322554980c0efe432d9e9..00b9f33f1bb3c2443a2ed25097a3a7215c489666 100644
--- a/src/os/linux/inc/os.h
+++ b/src/os/linux/inc/os.h
@@ -86,9 +86,28 @@ extern "C" {
} \
}
+#ifdef TAOS_RANDOM_NETWORK_FAIL
+
+ssize_t taos_send_random_fail(int sockfd, const void *buf, size_t len, int flags);
+
+ssize_t taos_sendto_random_fail(int sockfd, const void *buf, size_t len, int flags,
+ const struct sockaddr *dest_addr, socklen_t addrlen);
+ssize_t taos_read_random_fail(int fd, void *buf, size_t count);
+ssize_t taos_write_random_fail(int fd, const void *buf, size_t count);
+
+#define send(sockfd, buf, len, flags) taos_send_random_fail(sockfd, buf, len, flags)
+#define sendto(sockfd, buf, len, flags, dest_addr, addrlen) \
+ taos_sendto_random_fail(sockfd, buf, len, flags, dest_addr, addrlen)
+#define taosWriteSocket(fd, buf, len) taos_write_random_fail(fd, buf, len)
+#define taosReadSocket(fd, buf, len) taos_read_random_fail(fd, buf, len)
+
+#else
+
#define taosWriteSocket(fd, buf, len) write(fd, buf, len)
#define taosReadSocket(fd, buf, len) read(fd, buf, len)
+#endif /* TAOS_RANDOM_NETWORK_FAIL */
+
#define atomic_load_8(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_16(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_32(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
diff --git a/src/os/linux/src/linuxPlatform.c b/src/os/linux/src/linuxPlatform.c
index 9a38c98f81c36ad779c990fd40ca1d7395ffd3f8..216d8942bcac8953ecfd753658e71f6c2a422f4e 100644
--- a/src/os/linux/src/linuxPlatform.c
+++ b/src/os/linux/src/linuxPlatform.c
@@ -270,3 +270,49 @@ int tSystem(const char * cmd)
}
}
+#ifdef TAOS_RANDOM_NETWORK_FAIL
+
+#define RANDOM_NETWORK_FAIL_FACTOR 20
+
+ssize_t taos_send_random_fail(int sockfd, const void *buf, size_t len, int flags)
+{
+ if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) {
+ errno = ECONNRESET;
+ return -1;
+ }
+
+ return send(sockfd, buf, len, flags);
+}
+
+ssize_t taos_sendto_random_fail(int sockfd, const void *buf, size_t len, int flags,
+ const struct sockaddr *dest_addr, socklen_t addrlen)
+{
+ if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) {
+ errno = ECONNRESET;
+ return -1;
+ }
+
+ return sendto(sockfd, buf, len, flags, dest_addr, addrlen);
+}
+
+ssize_t taos_read_random_fail(int fd, void *buf, size_t count)
+{
+ if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) {
+ errno = ECONNRESET;
+ return -1;
+ }
+
+ return read(fd, buf, count);
+}
+
+ssize_t taos_write_random_fail(int fd, const void *buf, size_t count)
+{
+ if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) {
+ errno = EINTR;
+ return -1;
+ }
+
+ return write(fd, buf, count);
+}
+
+#endif /* TAOS_RANDOM_NETWORK_FAIL */
diff --git a/src/os/linux/src/linuxSysPara.c b/src/os/linux/src/linuxSysPara.c
index c2134765dfefba5cde330cb6a835e0c7c5261028..1331503619cd6295221bb9107334829d41e96fe3 100644
--- a/src/os/linux/src/linuxSysPara.c
+++ b/src/os/linux/src/linuxSysPara.c
@@ -160,7 +160,7 @@ static void taosGetSystemTimezone() {
/* load time zone string from /etc/timezone */
FILE *f = fopen("/etc/timezone", "r");
- char buf[65] = {0};
+ char buf[68] = {0};
if (f != NULL) {
int len = fread(buf, 64, 1, f);
if(len < 64 && ferror(f)) {
@@ -170,18 +170,17 @@ static void taosGetSystemTimezone() {
}
fclose(f);
- }
- char *lineEnd = strstr(buf, "\n");
- if (lineEnd != NULL) {
- *lineEnd = 0;
- }
+ char *lineEnd = strstr(buf, "\n");
+ if (lineEnd != NULL) {
+ *lineEnd = 0;
+ }
- // for CentOS system, /etc/timezone does not exist. Ignore the TZ environment variables
- if (strlen(buf) > 0) {
- setenv("TZ", buf, 1);
+ // for CentOS system, /etc/timezone does not exist. Ignore the TZ environment variables
+ if (strlen(buf) > 0) {
+ setenv("TZ", buf, 1);
+ }
}
-
// get and set default timezone
tzset();
diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c
index b09f34b562294e13f9ee7e9e728f1ad974f8c9b1..cdaee53c38a480ed5479e2ebf27efc139b677498 100644
--- a/src/plugins/http/src/httpContext.c
+++ b/src/plugins/http/src/httpContext.c
@@ -53,12 +53,12 @@ static void httpDestroyContext(void *data) {
httpFreeJsonBuf(pContext);
httpFreeMultiCmds(pContext);
- httpDebug("context:%p, is destroyed, refCount:%d", pContext, pContext->refCount);
+ httpDebug("context:%p, is destroyed, refCount:%d data:%p", pContext, pContext->refCount, data);
tfree(pContext);
}
bool httpInitContexts() {
- tsHttpServer.contextCache = taosCacheInitWithCb(2, httpDestroyContext);
+ tsHttpServer.contextCache = taosCacheInit(TSDB_DATA_TYPE_BIGINT, 2, false, httpDestroyContext, "restc");
if (tsHttpServer.contextCache == NULL) {
httpError("failed to init context cache");
return false;
@@ -103,17 +103,14 @@ HttpContext *httpCreateContext(int32_t fd) {
HttpContext *pContext = calloc(1, sizeof(HttpContext));
if (pContext == NULL) return NULL;
- char contextStr[16] = {0};
- snprintf(contextStr, sizeof(contextStr), "%p", pContext);
-
pContext->fd = fd;
pContext->httpVersion = HTTP_VERSION_10;
pContext->lastAccessTime = taosGetTimestampSec();
pContext->state = HTTP_CONTEXT_STATE_READY;
-
- HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, contextStr, &pContext, sizeof(HttpContext *), 3);
+
+ HttpContext **ppContext = taosCachePut(tsHttpServer.contextCache, &pContext, sizeof(void *), &pContext, sizeof(void *), 3);
pContext->ppContext = ppContext;
- httpDebug("context:%p, fd:%d, is created, item:%p", pContext, fd, ppContext);
+ httpDebug("context:%p, fd:%d, is created, data:%p", pContext, fd, ppContext);
// set the ref to 0
taosCacheRelease(tsHttpServer.contextCache, (void**)&ppContext, false);
@@ -122,16 +119,13 @@ HttpContext *httpCreateContext(int32_t fd) {
}
HttpContext *httpGetContext(void *ptr) {
- char contextStr[16] = {0};
- snprintf(contextStr, sizeof(contextStr), "%p", ptr);
-
- HttpContext **ppContext = taosCacheAcquireByName(tsHttpServer.contextCache, contextStr);
-
+ HttpContext **ppContext = taosCacheAcquireByKey(tsHttpServer.contextCache, &ptr, sizeof(HttpContext *));
+
if (ppContext) {
HttpContext *pContext = *ppContext;
if (pContext) {
int32_t refCount = atomic_add_fetch_32(&pContext->refCount, 1);
- httpDebug("context:%p, fd:%d, is accquired, refCount:%d", pContext, pContext->fd, refCount);
+ httpDebug("context:%p, fd:%d, is accquired, data:%p refCount:%d", pContext, pContext->fd, ppContext, refCount);
return pContext;
}
}
@@ -141,9 +135,10 @@ HttpContext *httpGetContext(void *ptr) {
void httpReleaseContext(HttpContext *pContext) {
int32_t refCount = atomic_sub_fetch_32(&pContext->refCount, 1);
assert(refCount >= 0);
- httpDebug("context:%p, is releasd, refCount:%d", pContext, refCount);
HttpContext **ppContext = pContext->ppContext;
+ httpDebug("context:%p, is releasd, data:%p refCount:%d", pContext, ppContext, refCount);
+
if (tsHttpServer.contextCache != NULL) {
taosCacheRelease(tsHttpServer.contextCache, (void **)(&ppContext), false);
} else {
diff --git a/src/plugins/http/src/httpJson.c b/src/plugins/http/src/httpJson.c
index 9276637d0ee99001d3ec2fc9f50626398a12ee24..82666826bcbdace2c1d5a58c1cda82c89553fe4e 100644
--- a/src/plugins/http/src/httpJson.c
+++ b/src/plugins/http/src/httpJson.c
@@ -441,7 +441,7 @@ void httpJsonPairStatus(JsonBuf* buf, int code) {
} else {
httpJsonPair(buf, "status", 6, "error", 5);
httpJsonItemToken(buf);
- httpJsonPairIntVal(buf, "code", 4, code);
+ httpJsonPairIntVal(buf, "code", 4, code & 0XFFFF);
httpJsonItemToken(buf);
if (code == TSDB_CODE_MND_DB_NOT_SELECTED) {
httpJsonPair(buf, "desc", 4, "failed to create database", 23);
diff --git a/src/plugins/http/src/httpResp.c b/src/plugins/http/src/httpResp.c
index de52e10f9a7d71590ad481770a382d88264c221e..f53aff7831619319c1600cb1f6a7605cb33ed1be 100644
--- a/src/plugins/http/src/httpResp.c
+++ b/src/plugins/http/src/httpResp.c
@@ -174,9 +174,9 @@ void httpSendErrorRespWithDesc(HttpContext *pContext, int errNo, char *desc) {
}
if (desc == NULL) {
- httpSendErrorRespImp(pContext, httpCode, httpCodeStr, errNo + 1000, httpMsg[errNo]);
+ httpSendErrorRespImp(pContext, httpCode, httpCodeStr, errNo + 5000, httpMsg[errNo]);
} else {
- httpSendErrorRespImp(pContext, httpCode, httpCodeStr, errNo + 1000, desc);
+ httpSendErrorRespImp(pContext, httpCode, httpCodeStr, errNo + 5000, desc);
}
}
@@ -184,7 +184,8 @@ void httpSendErrorResp(HttpContext *pContext, int errNo) { httpSendErrorRespWith
void httpSendTaosdErrorResp(HttpContext *pContext, int errCode) {
int httpCode = 400;
- httpSendErrorRespImp(pContext, httpCode, "Bad Request", 1000, (char*)tstrerror(errCode));
+
+ httpSendErrorRespImp(pContext, httpCode, "Bad Request", errCode & 0XFFFF, (char*)tstrerror(errCode));
}
void httpSendTaosdInvalidSqlErrorResp(HttpContext *pContext, char* errMsg) {
@@ -200,7 +201,7 @@ void httpSendTaosdInvalidSqlErrorResp(HttpContext *pContext, char* errMsg) {
} else {}
}
- httpSendErrorRespImp(pContext, httpCode, "Bad Request", 1000, temp);
+ httpSendErrorRespImp(pContext, httpCode, "Bad Request", TSDB_CODE_TSC_INVALID_SQL & 0XFFFF, temp);
}
void httpSendSuccResp(HttpContext *pContext, char *desc) {
diff --git a/src/plugins/http/src/httpServer.c b/src/plugins/http/src/httpServer.c
index 6c82386d8187410544219456368be0b399928207..a5009c2347a4a582984338b8257c015196d7b582 100644
--- a/src/plugins/http/src/httpServer.c
+++ b/src/plugins/http/src/httpServer.c
@@ -85,6 +85,7 @@ bool httpReadDataImp(HttpContext *pContext) {
} else {
httpError("context:%p, fd:%d, ip:%s, read from socket error:%d, close connect",
pContext, pContext->fd, pContext->ipstr, errno);
+ httpReleaseContext(pContext);
return false;
}
} else {
@@ -153,6 +154,7 @@ static bool httpReadData(HttpContext *pContext) {
int ret = httpCheckReadCompleted(pContext);
if (ret == HTTP_CHECK_BODY_CONTINUE) {
//httpDebug("context:%p, fd:%d, ip:%s, not finished yet, wait another event", pContext, pContext->fd, pContext->ipstr);
+ httpReleaseContext(pContext);
return false;
} else if (ret == HTTP_CHECK_BODY_SUCCESS){
httpDebug("context:%p, fd:%d, ip:%s, thread:%s, read size:%d, dataLen:%d",
@@ -161,11 +163,13 @@ static bool httpReadData(HttpContext *pContext) {
return true;
} else {
httpNotifyContextClose(pContext);
+ httpReleaseContext(pContext);
return false;
}
} else {
httpError("context:%p, fd:%d, ip:%s, failed to read http body, close connect", pContext, pContext->fd, pContext->ipstr);
httpNotifyContextClose(pContext);
+ httpReleaseContext(pContext);
return false;
}
}
diff --git a/src/plugins/http/src/httpSession.c b/src/plugins/http/src/httpSession.c
index 83602e1291646d2f574b2628569eb5a95fa8e2de..256b0c9549d850fffedf4e12fd715cf4ececaa98 100644
--- a/src/plugins/http/src/httpSession.c
+++ b/src/plugins/http/src/httpSession.c
@@ -33,9 +33,9 @@ void httpCreateSession(HttpContext *pContext, void *taos) {
memset(&session, 0, sizeof(HttpSession));
session.taos = taos;
session.refCount = 1;
- snprintf(session.id, HTTP_SESSION_ID_LEN, "%s.%s", pContext->user, pContext->pass);
+ int32_t len = snprintf(session.id, HTTP_SESSION_ID_LEN, "%s.%s", pContext->user, pContext->pass);
- pContext->session = taosCachePut(server->sessionCache, session.id, &session, sizeof(HttpSession), tsHttpSessionExpire);
+ pContext->session = taosCachePut(server->sessionCache, session.id, len, &session, sizeof(HttpSession), tsHttpSessionExpire);
// void *temp = pContext->session;
// taosCacheRelease(server->sessionCache, (void **)&temp, false);
@@ -57,9 +57,9 @@ static void httpFetchSessionImp(HttpContext *pContext) {
pthread_mutex_lock(&server->serverMutex);
char sessionId[HTTP_SESSION_ID_LEN];
- snprintf(sessionId, HTTP_SESSION_ID_LEN, "%s.%s", pContext->user, pContext->pass);
+ int32_t len = snprintf(sessionId, HTTP_SESSION_ID_LEN, "%s.%s", pContext->user, pContext->pass);
- pContext->session = taosCacheAcquireByName(server->sessionCache, sessionId);
+ pContext->session = taosCacheAcquireByKey(server->sessionCache, sessionId, len);
if (pContext->session != NULL) {
atomic_add_fetch_32(&pContext->session->refCount, 1);
httpDebug("context:%p, fd:%d, ip:%s, user:%s, find an exist session:%p:%p, sessionRef:%d", pContext, pContext->fd,
@@ -115,7 +115,7 @@ void httpCleanUpSessions() {
}
bool httpInitSessions() {
- tsHttpServer.sessionCache = taosCacheInitWithCb(5, httpDestroySession);
+ tsHttpServer.sessionCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, 5, false, httpDestroySession, "rests");
if (tsHttpServer.sessionCache == NULL) {
httpError("failed to init session cache");
return false;
diff --git a/src/plugins/mqtt/src/mqttSystem.c b/src/plugins/mqtt/src/mqttSystem.c
index 3df62f8bf4f362c8523503e1d27548cee336b9b5..2687106124497607a50c8b1435497cd2c467722a 100644
--- a/src/plugins/mqtt/src/mqttSystem.c
+++ b/src/plugins/mqtt/src/mqttSystem.c
@@ -111,7 +111,7 @@ void mqttStopSystem() {
}
void mqttCleanUpSystem() {
- mqttInfo("starting to clean up mqtt");
+ mqttInfo("starting to cleanup mqtt");
free(recntStatus.user_name);
free(recntStatus.password);
free(recntStatus.hostname);
diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h
index a83072384595d96fc60cc57ff35b1502c4b83b49..3aa1b60be576e9b0d3559d0f011c171a9af33a16 100644
--- a/src/query/inc/qExecutor.h
+++ b/src/query/inc/qExecutor.h
@@ -27,6 +27,7 @@
#include "tref.h"
#include "tsdb.h"
#include "tsqlfunction.h"
+#include "query.h"
struct SColumnFilterElem;
typedef bool (*__filter_func_t)(struct SColumnFilterElem* pFilter, char* val1, char* val2);
@@ -94,16 +95,13 @@ typedef struct SSingleColumnFilterInfo {
} SSingleColumnFilterInfo;
typedef struct STableQueryInfo { // todo merge with the STableQueryInfo struct
- int32_t tableIndex;
- int32_t groupIndex; // group id in table list
TSKEY lastKey;
- int32_t numOfRes;
+ int32_t groupIndex; // group id in table list
int16_t queryRangeSet; // denote if the query range is set, only available for interval query
int64_t tag;
STimeWindow win;
STSCursor cur;
- void* pTable; // for retrieve the page id list
-
+ void* pTable; // for retrieve the page id list
SWindowResInfo windowResInfo;
} STableQueryInfo;
@@ -126,11 +124,6 @@ typedef struct SQueryCostInfo {
uint64_t computTime;
} SQueryCostInfo;
-//typedef struct SGroupItem {
-// void *pTable;
-// STableQueryInfo *info;
-//} SGroupItem;
-
typedef struct SQuery {
int16_t numOfCols;
int16_t numOfTags;
@@ -172,22 +165,22 @@ typedef struct SQueryRuntimeEnv {
STSBuf* pTSBuf;
STSCursor cur;
SQueryCostInfo summary;
- bool stableQuery; // super table query or not
void* pQueryHandle;
void* pSecQueryHandle; // another thread for
- SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
+ bool stableQuery; // super table query or not
bool topBotQuery; // false
int32_t prevGroupId; // previous executed group id
+ SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
} SQueryRuntimeEnv;
typedef struct SQInfo {
- void* signature;
- int32_t pointsInterpo;
- int32_t code; // error code to returned to client
- sem_t dataReady;
- void* tsdb;
- int32_t vgId;
-
+ void* signature;
+ int32_t pointsInterpo;
+ int32_t code; // error code to returned to client
+ sem_t dataReady;
+ void* tsdb;
+ void* param;
+ int32_t vgId;
STableGroupInfo tableGroupInfo; // table id list < only includes the STable list>
STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray structure
SQueryRuntimeEnv runtimeEnv;
@@ -202,8 +195,10 @@ typedef struct SQInfo {
* We later may refactor to remove this attribution by using another flag to denote
* whether a multimeter query is completed or not.
*/
- int32_t tableIndex;
- int32_t numOfGroupResultPages;
+ int32_t tableIndex;
+ int32_t numOfGroupResultPages;
+ _qinfo_free_fn_t freeFn;
+ jmp_buf env;
} SQInfo;
#endif // TDENGINE_QUERYEXECUTOR_H
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 5a17e4e1e842b785538546ab0ee149f9b6e5dcc6..55cb35fdf907964bb649769d430e9673de59d79b 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -13,7 +13,10 @@
* along with this program. If not, see .
*/
#include "os.h"
+#include "tcache.h"
+#include "tglobal.h"
#include "qfill.h"
+#include "taosmsg.h"
#include "hash.h"
#include "qExecutor.h"
@@ -22,11 +25,11 @@
#include "qresultBuf.h"
#include "query.h"
#include "queryLog.h"
-#include "taosmsg.h"
#include "tlosertree.h"
-#include "tscUtil.h" // todo move the function to common module
+#include "exception.h"
#include "tscompression.h"
#include "ttime.h"
+#include "tfile.h"
/**
* check if the primary column is load by default, otherwise, the program will
@@ -87,6 +90,19 @@ typedef struct {
STSCursor cur;
} SQueryStatusInfo;
+#if 0
+static UNUSED_FUNC void *u_malloc (size_t __size) {
+ uint32_t v = rand();
+ if (v % 5 <= 1) {
+ return NULL;
+ } else {
+ return malloc(__size);
+ }
+}
+
+#define malloc u_malloc
+#endif
+
#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
#define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList)
#define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index)))
@@ -1509,7 +1525,6 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
}
static bool isQueryKilled(SQInfo *pQInfo) {
- return false;
return (pQInfo->code == TSDB_CODE_TSC_QUERY_CANCELLED);
}
@@ -2586,7 +2601,6 @@ void copyResToQueryResultBuf(SQInfo *pQInfo, SQuery *pQuery) {
}
int64_t getNumOfResultWindowRes(SQuery *pQuery, SWindowResult *pWindowRes) {
-// int64_t maxOutput = 0;
for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
int32_t functionId = pQuery->pSelectExpr[j].base.functionId;
@@ -2604,15 +2618,6 @@ int64_t getNumOfResultWindowRes(SQuery *pQuery, SWindowResult *pWindowRes) {
if (pResultInfo->numOfRes > 0) {
return pResultInfo->numOfRes;
}
-// if (pResultInfo != NULL && maxOutput < pResultInfo->numOfRes) {
-// maxOutput = pResultInfo->numOfRes;
-//
-// if (maxOutput > 0) {
-// break;
-// }
-// }
-//
-// assert(pResultInfo != NULL);
}
return 0;
@@ -2623,12 +2628,19 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) {
SQuery * pQuery = pRuntimeEnv->pQuery;
size_t size = taosArrayGetSize(pGroup);
-
tFilePage **buffer = pQuery->sdata;
- int32_t * posList = calloc(size, sizeof(int32_t));
+ int32_t* posList = calloc(size, sizeof(int32_t));
STableQueryInfo **pTableList = malloc(POINTER_BYTES * size);
+ if (pTableList == NULL || posList == NULL) {
+ tfree(posList);
+ tfree(pTableList);
+
+ qError("QInfo:%p failed alloc memory", pQInfo);
+ longjmp(pQInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+
// todo opt for the case of one table per group
int32_t numOfTables = 0;
for (int32_t i = 0; i < size; ++i) {
@@ -4069,7 +4081,7 @@ static SFillColInfo* taosCreateFillColInfo(SQuery* pQuery) {
return pFillCol;
}
-int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool isSTableQuery) {
+int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bool isSTableQuery) {
int32_t code = TSDB_CODE_SUCCESS;
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
@@ -4085,12 +4097,12 @@ int32_t doInitQInfo(SQInfo *pQInfo, void *param, void *tsdb, int32_t vgId, bool
pQInfo->vgId = vgId;
pRuntimeEnv->pQuery = pQuery;
- pRuntimeEnv->pTSBuf = param;
+ pRuntimeEnv->pTSBuf = pTsBuf;
pRuntimeEnv->cur.vgroupIndex = -1;
pRuntimeEnv->stableQuery = isSTableQuery;
pRuntimeEnv->prevGroupId = INT32_MIN;
- if (param != NULL) {
+ if (pTsBuf != NULL) {
int16_t order = (pQuery->order.order == pRuntimeEnv->pTSBuf->tsOrder) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC;
tsBufSetTraverseOrder(pRuntimeEnv->pTSBuf, order);
}
@@ -4331,7 +4343,9 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
taosArrayDestroy(s);
// here we simply set the first table as current table
- pQuery->current = (STableQueryInfo*) GET_TABLEGROUP(pQInfo, 0);
+ SArray* first = GET_TABLEGROUP(pQInfo, pQInfo->groupIndex);
+ pQuery->current = taosArrayGetP(first, 0);
+
scanOneTableDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
int64_t numOfRes = getNumOfResult(pRuntimeEnv);
@@ -4930,14 +4944,6 @@ static void tableQueryImpl(SQInfo *pQInfo) {
// record the total elapsed time
pRuntimeEnv->summary.elapsedTime += (taosGetTimestampUs() - st);
assert(pQInfo->tableqinfoGroupInfo.numOfTables == 1);
-
- /* check if query is killed or not */
- if (isQueryKilled(pQInfo)) {
- qDebug("QInfo:%p query is killed", pQInfo);
- } else {
- qDebug("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows",
- pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows);
- }
}
static void stableQueryImpl(SQInfo *pQInfo) {
@@ -4959,10 +4965,6 @@ static void stableQueryImpl(SQInfo *pQInfo) {
// record the total elapsed time
pQInfo->runtimeEnv.summary.elapsedTime += (taosGetTimestampUs() - st);
-
- if (pQuery->rec.rows == 0) {
- qDebug("QInfo:%p over, %zu tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total);
- }
}
static int32_t getColumnIndexInSource(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pExprMsg, SColumnInfo* pTagCols) {
@@ -5074,6 +5076,8 @@ static char *createTableIdList(SQueryTableMsg *pQueryMsg, char *pMsg, SArray **p
*/
static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, SSqlFuncMsg ***pExpr,
char **tagCond, char** tbnameCond, SColIndex **groupbyCols, SColumnInfo** tagCols) {
+ int32_t code = TSDB_CODE_SUCCESS;
+
pQueryMsg->numOfTables = htonl(pQueryMsg->numOfTables);
pQueryMsg->window.skey = htobe64(pQueryMsg->window.skey);
@@ -5100,7 +5104,8 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
// query msg safety check
if (!validateQueryMsg(pQueryMsg)) {
- return TSDB_CODE_QRY_INVALID_MSG;
+ code = TSDB_CODE_QRY_INVALID_MSG;
+ goto _cleanup;
}
char *pMsg = (char *)(pQueryMsg->colList) + sizeof(SColumnInfo) * pQueryMsg->numOfCols;
@@ -5172,7 +5177,8 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
int16_t functionId = pExprMsg->functionId;
if (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ || functionId == TSDB_FUNC_TAG_DUMMY) {
if (pExprMsg->colInfo.flag != TSDB_COL_TAG) { // ignore the column index check for arithmetic expression.
- return TSDB_CODE_QRY_INVALID_MSG;
+ code = TSDB_CODE_QRY_INVALID_MSG;
+ goto _cleanup;
}
} else {
// if (!validateExprColumnInfo(pQueryMsg, pExprMsg)) {
@@ -5184,6 +5190,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
}
if (!validateQuerySourceCols(pQueryMsg, *pExpr)) {
+ code = TSDB_CODE_QRY_INVALID_MSG;
goto _cleanup;
}
@@ -5191,6 +5198,10 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
if (pQueryMsg->numOfGroupCols > 0) { // group by tag columns
*groupbyCols = malloc(pQueryMsg->numOfGroupCols * sizeof(SColIndex));
+ if (*groupbyCols == NULL) {
+ code = TSDB_CODE_QRY_OUT_OF_MEMORY;
+ goto _cleanup;
+ }
for (int32_t i = 0; i < pQueryMsg->numOfGroupCols; ++i) {
(*groupbyCols)[i].colId = *(int16_t *)pMsg;
@@ -5246,7 +5257,13 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
if (*pMsg != 0) {
size_t len = strlen(pMsg) + 1;
+
*tbnameCond = malloc(len);
+ if (*tbnameCond == NULL) {
+ code = TSDB_CODE_QRY_OUT_OF_MEMORY;
+ goto _cleanup;
+ }
+
strcpy(*tbnameCond, pMsg);
pMsg += len;
}
@@ -5256,7 +5273,8 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList,
pQueryMsg, pQueryMsg->numOfTables, pQueryMsg->queryType, pQueryMsg->window.skey, pQueryMsg->window.ekey, pQueryMsg->numOfGroupCols,
pQueryMsg->order, pQueryMsg->numOfOutput, pQueryMsg->numOfCols, pQueryMsg->intervalTime,
pQueryMsg->fillType, pQueryMsg->tsLen, pQueryMsg->tsNumOfBlocks, pQueryMsg->limit, pQueryMsg->offset);
- return 0;
+
+ return TSDB_CODE_SUCCESS;
_cleanup:
tfree(*pExpr);
@@ -5266,7 +5284,8 @@ _cleanup:
tfree(*groupbyCols);
tfree(*tagCols);
tfree(*tagCond);
- return TSDB_CODE_QRY_INVALID_MSG;
+
+ return code;
}
static int32_t buildAirthmeticExprFromMsg(SExprInfo *pArithExprInfo, SQueryTableMsg *pQueryMsg) {
@@ -5654,7 +5673,6 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
STableQueryInfo* item = createTableQueryInfo(&pQInfo->runtimeEnv, pTable, window);
item->groupIndex = i;
- item->tableIndex = tableIndex++;
taosArrayPush(p1, &item);
taosHashPut(pQInfo->tableqinfoGroupInfo.map, &id.tid, sizeof(id.tid), &item, POINTER_BYTES);
}
@@ -5668,7 +5686,8 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
pQuery->window = pQueryMsg->window;
if (sem_init(&pQInfo->dataReady, 0, 0) != 0) {
- qError("QInfo:%p init dataReady sem failed, reason:%s", pQInfo, strerror(errno));
+ int32_t code = TAOS_SYSTEM_ERROR(errno);
+ qError("QInfo:%p init dataReady sem failed, reason:%s", pQInfo, tstrerror(code));
goto _cleanup;
}
@@ -5679,7 +5698,6 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SArray* pTableIdList,
_cleanup:
freeQInfo(pQInfo);
-
return NULL;
}
@@ -5697,8 +5715,7 @@ static bool isValidQInfo(void *param) {
return (sig == (uint64_t)pQInfo);
}
-
-static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, bool isSTable) {
+static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQInfo *pQInfo, bool isSTable, void* param, _qinfo_free_fn_t fn) {
int32_t code = TSDB_CODE_SUCCESS;
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
@@ -5722,6 +5739,9 @@ static int32_t initQInfo(SQueryTableMsg *pQueryMsg, void *tsdb, int32_t vgId, SQ
return TSDB_CODE_SUCCESS;
}
+ pQInfo->param = param;
+ pQInfo->freeFn = fn;
+
if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) {
qDebug("QInfo:%p no table qualified for tag filter, abort query", pQInfo);
setQueryStatus(pQuery, QUERY_COMPLETED);
@@ -5785,7 +5805,7 @@ static void freeQInfo(SQInfo *pQInfo) {
// todo refactor, extract method to destroytableDataInfo
int32_t numOfGroups = GET_NUM_OF_TABLEGROUP(pQInfo);
for (int32_t i = 0; i < numOfGroups; ++i) {
- SArray *p = GET_TABLEGROUP(pQInfo, i);;
+ SArray *p = GET_TABLEGROUP(pQInfo, i);
size_t num = taosArrayGetSize(p);
for(int32_t j = 0; j < num; ++j) {
@@ -5894,8 +5914,16 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) {
return TSDB_CODE_SUCCESS;
}
-int32_t qCreateQueryInfo(void *tsdb, int32_t vgId, SQueryTableMsg *pQueryMsg, qinfo_t *pQInfo) {
- assert(pQueryMsg != NULL);
+typedef struct SQueryMgmt {
+ SCacheObj *qinfoPool; // query handle pool
+ int32_t vgId;
+ bool closed;
+ pthread_mutex_t lock;
+} SQueryMgmt;
+
+int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, void* param, _qinfo_free_fn_t fn,
+ qinfo_t* pQInfo) {
+ assert(pQueryMsg != NULL && tsdb != NULL);
int32_t code = TSDB_CODE_SUCCESS;
@@ -5984,7 +6012,7 @@ int32_t qCreateQueryInfo(void *tsdb, int32_t vgId, SQueryTableMsg *pQueryMsg, qi
goto _over;
}
- code = initQInfo(pQueryMsg, tsdb, vgId, *pQInfo, isSTableQuery);
+ code = initQInfo(pQueryMsg, tsdb, vgId, *pQInfo, isSTableQuery, param, fn);
_over:
free(tagCond);
@@ -6020,7 +6048,7 @@ static void doDestoryQueryInfo(SQInfo* pQInfo) {
freeQInfo(pQInfo);
}
-void qDestroyQueryInfo(qinfo_t qHandle, void (*fp)(void*), void* param) {
+void qDestroyQueryInfo(qinfo_t qHandle) {
SQInfo* pQInfo = (SQInfo*) qHandle;
if (!isValidQInfo(pQInfo)) {
return;
@@ -6030,15 +6058,19 @@ void qDestroyQueryInfo(qinfo_t qHandle, void (*fp)(void*), void* param) {
qDebug("QInfo:%p dec refCount, value:%d", pQInfo, ref);
if (ref == 0) {
- doDestoryQueryInfo(pQInfo);
+ _qinfo_free_fn_t freeFp = pQInfo->freeFn;
+ void* param = pQInfo->param;
- if (fp != NULL) {
- fp(param);
+ doDestoryQueryInfo(pQInfo);
+ if (freeFp != NULL) {
+ assert(param != NULL);
+ freeFp(param);
}
+
}
}
-void qTableQuery(qinfo_t qinfo, void (*fp)(void*), void* param) {
+void qTableQuery(qinfo_t qinfo) {
SQInfo *pQInfo = (SQInfo *)qinfo;
if (pQInfo == NULL || pQInfo->signature != pQInfo) {
@@ -6048,17 +6080,34 @@ void qTableQuery(qinfo_t qinfo, void (*fp)(void*), void* param) {
if (isQueryKilled(pQInfo)) {
qDebug("QInfo:%p it is already killed, abort", pQInfo);
- qDestroyQueryInfo(pQInfo, fp, param);
+
+ sem_post(&pQInfo->dataReady);
+ qDestroyQueryInfo(pQInfo);
return;
}
if (pQInfo->tableqinfoGroupInfo.numOfTables == 0) {
qDebug("QInfo:%p no table exists for query, abort", pQInfo);
+
+ sem_post(&pQInfo->dataReady);
+ qDestroyQueryInfo(pQInfo);
+ return;
+ }
+
+ int32_t ret = setjmp(pQInfo->env);
+ // error occurs, record the error code and return to client
+ if (ret != TSDB_CODE_SUCCESS) {
+ pQInfo->code = ret;
+ qDebug("QInfo:%p query abort due to error occurs, code:%s", pQInfo, tstrerror(pQInfo->code));
+ sem_post(&pQInfo->dataReady);
+ qDestroyQueryInfo(pQInfo);
+
return;
}
qDebug("QInfo:%p query task is launched", pQInfo);
+ SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv;
if (onlyQueryTags(pQInfo->runtimeEnv.pQuery)) {
assert(pQInfo->runtimeEnv.pQueryHandle == NULL);
buildTagQueryResult(pQInfo); // todo support the limit/offset
@@ -6068,8 +6117,18 @@ void qTableQuery(qinfo_t qinfo, void (*fp)(void*), void* param) {
tableQueryImpl(pQInfo);
}
+ SQuery* pQuery = pRuntimeEnv->pQuery;
+ if (isQueryKilled(pQInfo)) {
+ qDebug("QInfo:%p query is killed", pQInfo);
+ } else if (pQuery->rec.rows == 0) {
+ qDebug("QInfo:%p over, %zu tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total);
+ } else {
+ qDebug("QInfo:%p query paused, %" PRId64 " rows returned, numOfTotal:%" PRId64 " rows",
+ pQInfo, pQuery->rec.rows, pQuery->rec.total + pQuery->rec.rows);
+ }
+
sem_post(&pQInfo->dataReady);
- qDestroyQueryInfo(pQInfo, fp, param);
+ qDestroyQueryInfo(pQInfo);
}
int32_t qRetrieveQueryResultInfo(qinfo_t qinfo) {
@@ -6162,7 +6221,7 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co
return code;
}
-int32_t qKillQuery(qinfo_t qinfo, void (*fp)(void*), void* param) {
+int32_t qKillQuery(qinfo_t qinfo) {
SQInfo *pQInfo = (SQInfo *)qinfo;
if (pQInfo == NULL || !isValidQInfo(pQInfo)) {
@@ -6170,8 +6229,7 @@ int32_t qKillQuery(qinfo_t qinfo, void (*fp)(void*), void* param) {
}
setQueryKilled(pQInfo);
- qDestroyQueryInfo(pQInfo, fp, param);
-
+ qDestroyQueryInfo(pQInfo);
return TSDB_CODE_SUCCESS;
}
@@ -6309,3 +6367,112 @@ static void buildTagQueryResult(SQInfo* pQInfo) {
setQueryStatus(pQuery, QUERY_COMPLETED);
}
+void freeqinfoFn(void *qhandle) {
+ void** handle = qhandle;
+ if (handle == NULL || *handle == NULL) {
+ return;
+ }
+
+ qKillQuery(*handle);
+}
+
+void* qOpenQueryMgmt(int32_t vgId) {
+ const int32_t REFRESH_HANDLE_INTERVAL = 2; // every 2 seconds, refresh handle pool
+
+ char cacheName[128] = {0};
+ sprintf(cacheName, "qhandle_%d", vgId);
+
+ SQueryMgmt* pQueryHandle = calloc(1, sizeof(SQueryMgmt));
+
+ pQueryHandle->qinfoPool = taosCacheInit(TSDB_DATA_TYPE_BIGINT, REFRESH_HANDLE_INTERVAL, true, freeqinfoFn, cacheName);
+ pQueryHandle->closed = false;
+ pthread_mutex_init(&pQueryHandle->lock, NULL);
+
+ qDebug("vgId:%d, open querymgmt success", vgId);
+ return pQueryHandle;
+}
+
+void qSetQueryMgmtClosed(void* pQMgmt) {
+ if (pQMgmt == NULL) {
+ return;
+ }
+
+ SQueryMgmt* pQueryMgmt = pQMgmt;
+ qDebug("vgId:%d, set querymgmt closed, wait for all queries cancelled", pQueryMgmt->vgId);
+
+ pthread_mutex_lock(&pQueryMgmt->lock);
+ pQueryMgmt->closed = true;
+ pthread_mutex_unlock(&pQueryMgmt->lock);
+
+ taosCacheEmpty(pQueryMgmt->qinfoPool, true);
+}
+
+void qCleanupQueryMgmt(void* pQMgmt) {
+ if (pQMgmt == NULL) {
+ return;
+ }
+
+ SQueryMgmt* pQueryMgmt = pQMgmt;
+ int32_t vgId = pQueryMgmt->vgId;
+
+ assert(pQueryMgmt->closed);
+
+ SCacheObj* pqinfoPool = pQueryMgmt->qinfoPool;
+ pQueryMgmt->qinfoPool = NULL;
+
+ taosCacheCleanup(pqinfoPool);
+ pthread_mutex_destroy(&pQueryMgmt->lock);
+ tfree(pQueryMgmt);
+
+ qDebug("vgId:%d querymgmt cleanup completed", vgId);
+}
+
+void** qRegisterQInfo(void* pMgmt, void* qInfo) {
+ if (pMgmt == NULL) {
+ return NULL;
+ }
+
+ SQueryMgmt *pQueryMgmt = pMgmt;
+ if (pQueryMgmt->qinfoPool == NULL) {
+ return NULL;
+ }
+
+ pthread_mutex_lock(&pQueryMgmt->lock);
+ if (pQueryMgmt->closed) {
+ pthread_mutex_unlock(&pQueryMgmt->lock);
+
+ return NULL;
+ } else {
+ void** handle = taosCachePut(pQueryMgmt->qinfoPool, qInfo, POINTER_BYTES, &qInfo, POINTER_BYTES, tsShellActivityTimer*2);
+ pthread_mutex_unlock(&pQueryMgmt->lock);
+
+ return handle;
+ }
+}
+
+void** qAcquireQInfo(void* pMgmt, void** key) {
+ SQueryMgmt *pQueryMgmt = pMgmt;
+
+ if (pQueryMgmt->qinfoPool == NULL || pQueryMgmt->closed) {
+ return NULL;
+ }
+
+ void** handle = taosCacheAcquireByKey(pQueryMgmt->qinfoPool, key, POINTER_BYTES);
+ if (handle == NULL || *handle == NULL) {
+ return NULL;
+ } else {
+ return handle;
+ }
+}
+
+void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool needFree) {
+ SQueryMgmt *pQueryMgmt = pMgmt;
+
+ if (pQueryMgmt->qinfoPool == NULL) {
+ return NULL;
+ }
+
+ taosCacheRelease(pQueryMgmt->qinfoPool, pQInfo, needFree);
+ return 0;
+}
+
diff --git a/src/query/src/qast.c b/src/query/src/qast.c
index dc3b1499bbe02fd389cb1502361fd5cdc46bc54e..721cd8ae5a2fec6233faf84f71ecf08378ab19d9 100644
--- a/src/query/src/qast.c
+++ b/src/query/src/qast.c
@@ -1173,9 +1173,7 @@ tExprNode* exprTreeFromTableName(const char* tbnameCond) {
size_t len = strlen(cond) + VARSTR_HEADER_SIZE;
char* p = exception_malloc(len);
- varDataSetLen(p, len - VARSTR_HEADER_SIZE);
- memcpy(varDataVal(p), cond, len);
-
+ STR_WITH_SIZE_TO_VARSTR(p, cond, len - VARSTR_HEADER_SIZE);
taosArrayPush(pVal->arr, &p);
}
diff --git a/src/query/src/qparserImpl.c b/src/query/src/qparserImpl.c
index 928b9eb8738e5c3ee299ed595fb163182c76e5a8..d4ac540d2fc725488bae83fb8890840f6ddb59a9 100644
--- a/src/query/src/qparserImpl.c
+++ b/src/query/src/qparserImpl.c
@@ -15,16 +15,16 @@
#include "os.h"
#include "qsqlparser.h"
+#include "queryLog.h"
#include "taosdef.h"
#include "taosmsg.h"
+#include "tcmdtype.h"
#include "tglobal.h"
#include "tstoken.h"
+#include "tstrbuild.h"
#include "ttime.h"
#include "ttokendef.h"
#include "tutil.h"
-#include "qsqltype.h"
-#include "tstrbuild.h"
-#include "queryLog.h"
SSqlInfo qSQLParse(const char *pStr) {
void *pParser = ParseAlloc(malloc);
diff --git a/src/query/src/sql.c b/src/query/src/sql.c
index e75802a98f4e94c23034072d908b8b5ece3acd7a..eafb052593b121047e3388b7ead3e754ffd3e55d 100644
--- a/src/query/src/sql.c
+++ b/src/query/src/sql.c
@@ -25,17 +25,17 @@
#include
/************ Begin %include sections from the grammar ************************/
+#include
+#include
#include
#include
#include
-#include
-#include
-#include "tutil.h"
#include "qsqlparser.h"
+#include "tcmdtype.h"
#include "tstoken.h"
-#include "tvariant.h"
#include "ttokendef.h"
-#include "qsqltype.h"
+#include "tutil.h"
+#include "tvariant.h"
/**************** End of %include directives **********************************/
/* These constants specify the various numeric values for terminal symbols
** in a format understandable to "makeheaders". This section is blank unless
diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c
index facfb79352470a05bf323ea75d1003c25be1587e..c05c8c76e16fc96d5a513608e26fcd24956bc7b6 100644
--- a/src/rpc/src/rpcMain.c
+++ b/src/rpc/src/rpcMain.c
@@ -73,6 +73,7 @@ typedef struct {
SRpcInfo *pRpc; // associated SRpcInfo
SRpcIpSet ipSet; // ip list provided by app
void *ahandle; // handle provided by app
+ void *signature; // for validation
struct SRpcConn *pConn; // pConn allocated
char msgType; // message type
uint8_t *pCont; // content provided by app
@@ -361,6 +362,7 @@ void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, SRpcMsg *pMsg) {
int contLen = rpcCompressRpcMsg(pMsg->pCont, pMsg->contLen);
pContext = (SRpcReqContext *) (pMsg->pCont-sizeof(SRpcHead)-sizeof(SRpcReqContext));
pContext->ahandle = pMsg->ahandle;
+ pContext->signature = pContext;
pContext->pRpc = (SRpcInfo *)shandle;
pContext->ipSet = *pIpSet;
pContext->contLen = contLen;
@@ -527,11 +529,13 @@ int rpcReportProgress(void *handle, char *pCont, int contLen) {
return code;
}
-/* todo: cancel process may have race condition, pContext may have been released
- just before app calls the rpcCancelRequest */
void rpcCancelRequest(void *handle) {
SRpcReqContext *pContext = handle;
+ // signature is used to check if pContext is freed.
+ // pContext may have been released just before app calls the rpcCancelRequest
+ if (pContext->signature != pContext) return;
+
if (pContext->pConn) {
tDebug("%s, app trys to cancel request", pContext->pConn->info);
rpcCloseConn(pContext->pConn);
@@ -1005,6 +1009,7 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) {
SRpcInfo *pRpc = pContext->pRpc;
+ pContext->signature = NULL;
pContext->pConn = NULL;
if (pContext->pRsp) {
// for synchronous API
@@ -1529,10 +1534,10 @@ static void rpcAddRef(SRpcInfo *pRpc)
static void rpcDecRef(SRpcInfo *pRpc)
{
if (atomic_sub_fetch_32(&pRpc->refCount, 1) == 0) {
+ rpcCloseConnCache(pRpc->pCache);
taosHashCleanup(pRpc->hash);
taosTmrCleanUp(pRpc->tmrCtrl);
taosIdPoolCleanUp(pRpc->idPool);
- rpcCloseConnCache(pRpc->pCache);
tfree(pRpc->connList);
pthread_mutex_destroy(&pRpc->mutex);
diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c
index 33eae639b8f5152f05c194a60838c14d5b3ec5d0..95cc47292b1c24735fb564c7a6091fdeba801868 100644
--- a/src/tsdb/src/tsdbFile.c
+++ b/src/tsdb/src/tsdbFile.c
@@ -28,6 +28,7 @@
#include "tsdbMain.h"
#include "tutil.h"
#include "ttime.h"
+#include "tfile.h"
const char *tsdbFileSuffix[] = {".head", ".data", ".last", "", ".h", ".l"};
diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c
index e650cef45cf14a351ff7986985df43b4d1112b71..4b9e977a1b741c6998b3c6bca8b7e7ec2cae6a27 100644
--- a/src/tsdb/src/tsdbMain.c
+++ b/src/tsdb/src/tsdbMain.c
@@ -415,11 +415,11 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) {
// Check maxTables
if (pCfg->maxTables == -1) {
- pCfg->maxTables = TSDB_DEFAULT_TABLES;
+ pCfg->maxTables = TSDB_DEFAULT_TABLES+1;
} else {
- if (pCfg->maxTables < TSDB_MIN_TABLES || pCfg->maxTables > TSDB_MAX_TABLES) {
+ if (pCfg->maxTables - 1 < TSDB_MIN_TABLES || pCfg->maxTables - 1 > TSDB_MAX_TABLES) {
tsdbError("vgId:%d invalid maxTables configuration! maxTables %d TSDB_MIN_TABLES %d TSDB_MAX_TABLES %d",
- pCfg->tsdbId, pCfg->maxTables, TSDB_MIN_TABLES, TSDB_MAX_TABLES);
+ pCfg->tsdbId, pCfg->maxTables - 1, TSDB_MIN_TABLES, TSDB_MAX_TABLES);
goto _err;
}
}
diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c
index dafc7dbb1bcb2eab6fa5f8f6587cc322879512d1..cbbf51d862686c1b36ed36b91af5528016276ea6 100644
--- a/src/tsdb/src/tsdbMeta.c
+++ b/src/tsdb/src/tsdbMeta.c
@@ -255,17 +255,46 @@ _err:
return NULL;
}
+static int32_t colIdCompar(const void* left, const void* right) {
+ int16_t colId = *(int16_t*) left;
+ STColumn* p2 = (STColumn*) right;
+
+ if (colId == p2->colId) {
+ return 0;
+ }
+
+ return (colId < p2->colId)? -1:1;
+}
+
int tsdbUpdateTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg) {
STsdbRepo *pRepo = (STsdbRepo *)repo;
STsdbMeta *pMeta = pRepo->tsdbMeta;
- int16_t tversion = htons(pMsg->tversion);
- STable *pTable = tsdbGetTableByUid(pMeta, htobe64(pMsg->uid));
+ pMsg->uid = htobe64(pMsg->uid);
+ pMsg->tid = htonl(pMsg->tid);
+ pMsg->tversion = htons(pMsg->tversion);
+ pMsg->colId = htons(pMsg->colId);
+ pMsg->tagValLen = htonl(pMsg->tagValLen);
+ pMsg->numOfTags = htons(pMsg->numOfTags);
+ pMsg->schemaLen = htonl(pMsg->schemaLen);
+ assert(pMsg->schemaLen == sizeof(STColumn) * pMsg->numOfTags);
+
+ char* d = pMsg->data;
+ for(int32_t i = 0; i < pMsg->numOfTags; ++i) {
+ STColumn* pCol = (STColumn*) d;
+ pCol->colId = htons(pCol->colId);
+ pCol->bytes = htons(pCol->bytes);
+ pCol->offset = 0;
+
+ d += sizeof(STColumn);
+ }
+
+ STable *pTable = tsdbGetTableByUid(pMeta, pMsg->uid);
if (pTable == NULL) {
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
- if (TABLE_TID(pTable) != htonl(pMsg->tid)) {
+ if (TABLE_TID(pTable) != pMsg->tid) {
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
@@ -277,10 +306,10 @@ int tsdbUpdateTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg) {
return -1;
}
- if (schemaVersion(tsdbGetTableTagSchema(pTable)) < tversion) {
+ if (schemaVersion(tsdbGetTableTagSchema(pTable)) < pMsg->tversion) {
tsdbDebug("vgId:%d server tag version %d is older than client tag version %d, try to config", REPO_ID(pRepo),
- schemaVersion(tsdbGetTableTagSchema(pTable)), tversion);
- void *msg = (*pRepo->appH.configFunc)(pRepo->config.tsdbId, htonl(pMsg->tid));
+ schemaVersion(tsdbGetTableTagSchema(pTable)), pMsg->tversion);
+ void *msg = (*pRepo->appH.configFunc)(pRepo->config.tsdbId, pMsg->tid);
if (msg == NULL) return -1;
// Deal with error her
@@ -299,19 +328,24 @@ int tsdbUpdateTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg) {
STSchema *pTagSchema = tsdbGetTableTagSchema(pTable);
- if (schemaVersion(pTagSchema) > tversion) {
+ if (schemaVersion(pTagSchema) > pMsg->tversion) {
tsdbError(
"vgId:%d failed to update tag value of table %s since version out of date, client tag version %d server tag "
"version %d",
- REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), tversion, schemaVersion(pTable->tagSchema));
+ REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), pMsg->tversion, schemaVersion(pTable->tagSchema));
return TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE;
}
- if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == htons(pMsg->colId)) {
+ if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == pMsg->colId) {
tsdbRemoveTableFromIndex(pMeta, pTable);
}
// TODO: remove table from index if it is the first column of tag
- tdSetKVRowDataOfCol(&pTable->tagVal, htons(pMsg->colId), htons(pMsg->type), pMsg->data);
- if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == htons(pMsg->colId)) {
+
+ // TODO: convert the tag schema from client, and then extract the type and bytes from schema according to colId
+ STColumn* res = bsearch(&pMsg->colId, pMsg->data, pMsg->numOfTags, sizeof(STColumn), colIdCompar);
+ assert(res != NULL);
+
+ tdSetKVRowDataOfCol(&pTable->tagVal, pMsg->colId, res->type, pMsg->data + pMsg->schemaLen);
+ if (schemaColAt(pTagSchema, DEFAULT_TAG_INDEX_COLUMN)->colId == pMsg->colId) {
tsdbAddTableIntoIndex(pMeta, pTable);
}
return TSDB_CODE_SUCCESS;
@@ -541,7 +575,7 @@ void tsdbRefTable(STable *pTable) {
void tsdbUnRefTable(STable *pTable) {
int32_t ref = T_REF_DEC(pTable);
- tsdbDebug("unref table uid:%"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref);
+ tsdbTrace("unref table uid:%"PRIu64", tid:%d, refCount:%d", TABLE_UID(pTable), TABLE_TID(pTable), ref);
if (ref == 0) {
// tsdbDebug("destory table name:%s uid:%"PRIu64", tid:%d", TABLE_CHAR_NAME(pTable), TABLE_UID(pTable), TABLE_TID(pTable));
diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c
index 934fa8e73373d365d2d567379e121f90e4db88ff..eab9a5e0565a301d5f6d2f30a16c876c1acb781f 100644
--- a/src/tsdb/src/tsdbRWHelper.c
+++ b/src/tsdb/src/tsdbRWHelper.c
@@ -19,6 +19,7 @@
#include "tcoding.h"
#include "tscompression.h"
#include "tsdbMain.h"
+#include "tfile.h"
#define TSDB_GET_COMPCOL_LEN(nCols) (sizeof(SCompData) + sizeof(SCompCol) * (nCols) + sizeof(TSCKSUM))
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index 19a022e0a7acb92643e1282970d4d625da33a75f..6a9c8e1ff668ed9792b10f427e8b5d93dfded847 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -74,9 +74,6 @@ typedef struct STableCheckInfo {
SDataCols* pDataCols;
int32_t chosen; // indicate which iterator should move forward
bool initBuf; // whether to initialize the in-memory skip list iterator or not
- SMemTable* mem; // in-mem buffer, hold the ref count
- SMemTable* imem; // imem buffer, hold the ref count to avoid release
-
SSkipListIterator* iter; // mem buffer skip list iterator
SSkipListIterator* iiter; // imem buffer skip list iterator
} STableCheckInfo;
@@ -113,6 +110,8 @@ typedef struct STsdbQueryHandle {
SFileGroupIter fileIter;
SRWHelper rhelper;
STableBlockInfo* pDataBlockInfo;
+ SMemTable* mem; // mem-table
+ SMemTable* imem; // imem-table, acquired from snapshot
SDataBlockLoadInfo dataBlockLoadInfo; /* record current block load information */
SLoadCompBlockInfo compBlockLoadInfo; /* record current compblock information in SQuery */
@@ -138,9 +137,6 @@ static void tsdbInitCompBlockLoadInfo(SLoadCompBlockInfo* pCompBlockLoadInfo) {
}
TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) {
- // todo 1. filter not exist table
- // todo 2. add the reference count for each table that is involved in query
-
STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
pQueryHandle->order = pCond->order;
pQueryHandle->window = pCond->twindow;
@@ -154,6 +150,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock;
tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb);
+ tsdbTakeMemSnapshot(pQueryHandle->pTsdb, &pQueryHandle->mem, &pQueryHandle->imem);
size_t sizeOfGroup = taosArrayGetSize(groupList->pGroupList);
assert(sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0);
@@ -252,22 +249,22 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
pCheckInfo->initBuf = true;
int32_t order = pHandle->order;
- tsdbTakeMemSnapshot(pHandle->pTsdb, &pCheckInfo->mem, &pCheckInfo->imem);
+// tsdbTakeMemSnapshot(pHandle->pTsdb, &pCheckInfo->mem, &pCheckInfo->imem);
// no data in buffer, abort
- if (pCheckInfo->mem == NULL && pCheckInfo->imem == NULL) {
+ if (pHandle->mem == NULL && pHandle->imem == NULL) {
return false;
}
assert(pCheckInfo->iter == NULL && pCheckInfo->iiter == NULL);
- if (pCheckInfo->mem && pCheckInfo->mem->tData[pCheckInfo->tableId.tid] != NULL) {
- pCheckInfo->iter = tSkipListCreateIterFromVal(pCheckInfo->mem->tData[pCheckInfo->tableId.tid]->pData,
+ if (pHandle->mem && pHandle->mem->tData[pCheckInfo->tableId.tid] != NULL) {
+ pCheckInfo->iter = tSkipListCreateIterFromVal(pHandle->mem->tData[pCheckInfo->tableId.tid]->pData,
(const char*) &pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
}
- if (pCheckInfo->imem && pCheckInfo->imem->tData[pCheckInfo->tableId.tid] != NULL) {
- pCheckInfo->iiter = tSkipListCreateIterFromVal(pCheckInfo->imem->tData[pCheckInfo->tableId.tid]->pData,
+ if (pHandle->imem && pHandle->imem->tData[pCheckInfo->tableId.tid] != NULL) {
+ pCheckInfo->iiter = tSkipListCreateIterFromVal(pHandle->imem->tData[pCheckInfo->tableId.tid]->pData,
(const char*) &pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
}
@@ -685,6 +682,7 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
// query ended in current block
if (pQueryHandle->window.ekey < pBlock->keyLast || pCheckInfo->lastKey > pBlock->keyFirst) {
if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo)) {
+ taosArrayDestroy(sa);
return false;
}
@@ -1504,6 +1502,7 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
pQueryHandle->window = pQueryHandle->cur.win;
pQueryHandle->cur.rows = 1;
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
+ taosArrayDestroy(sa);
return true;
} else {
STsdbQueryHandle* pSecQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
@@ -1518,7 +1517,8 @@ bool tsdbNextDataBlock(TsdbQueryHandleT* pHandle) {
pSecQueryHandle->outputCapacity = ((STsdbRepo*)pSecQueryHandle->pTsdb)->config.maxRowsPerFileBlock;
tsdbInitReadHelper(&pSecQueryHandle->rhelper, (STsdbRepo*) pSecQueryHandle->pTsdb);
-
+ tsdbTakeMemSnapshot(pSecQueryHandle->pTsdb, &pSecQueryHandle->mem, &pSecQueryHandle->imem);
+
// allocate buffer in order to load data blocks from file
int32_t numOfCols = QH_GET_NUM_OF_COLS(pQueryHandle);
@@ -2083,26 +2083,15 @@ bool indexedNodeFilterFp(const void* pNode, void* param) {
STable* pTable = *(STable**)(SL_GET_NODE_DATA((SSkipListNode*)pNode));
char* val = NULL;
- int8_t type = pInfo->sch.type;
if (pInfo->colIndex == TSDB_TBNAME_COLUMN_INDEX) {
val = (char*) pTable->name;
- type = TSDB_DATA_TYPE_BINARY;
} else {
val = tdGetKVRowValOfCol(pTable->tagVal, pInfo->sch.colId);
}
//todo :the val is possible to be null, so check it out carefully
- int32_t ret = 0;
- if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
- if (pInfo->optr == TSDB_RELATION_IN) {
- ret = pInfo->compare(val, pInfo->q);
- } else {
- ret = pInfo->compare(val, pInfo->q);
- }
- } else {
- ret = pInfo->compare(val, pInfo->q);
- }
+ int32_t ret = pInfo->compare(val, pInfo->q);
switch (pInfo->optr) {
case TSDB_RELATION_EQUAL: {
@@ -2271,7 +2260,9 @@ int32_t tsdbGetOneTableGroup(TSDB_REPO_T* tsdb, uint64_t uid, STableGroupInfo* p
}
int32_t tsdbGetTableGroupFromIdList(TSDB_REPO_T* tsdb, SArray* pTableIdList, STableGroupInfo* pGroupInfo) {
- if (tsdbRLockRepoMeta(tsdb) < 0) goto _error;
+ if (tsdbRLockRepoMeta(tsdb) < 0) {
+ return terrno;
+ }
assert(pTableIdList != NULL);
size_t size = taosArrayGetSize(pTableIdList);
@@ -2297,15 +2288,15 @@ int32_t tsdbGetTableGroupFromIdList(TSDB_REPO_T* tsdb, SArray* pTableIdList, STa
taosArrayPush(group, &pTable);
}
- if (tsdbUnlockRepoMeta(tsdb) < 0) goto _error;
+ if (tsdbUnlockRepoMeta(tsdb) < 0) {
+ taosArrayDestroy(group);
+ return terrno;
+ }
pGroupInfo->numOfTables = i;
taosArrayPush(pGroupInfo->pGroupList, &group);
return TSDB_CODE_SUCCESS;
-
- _error:
- return terrno;
}
void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
@@ -2319,9 +2310,6 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
STableCheckInfo* pTableCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
tSkipListDestroyIter(pTableCheckInfo->iter);
- tsdbUnRefMemTable(pQueryHandle->pTsdb, pTableCheckInfo->mem);
- tsdbUnRefMemTable(pQueryHandle->pTsdb, pTableCheckInfo->imem);
-
if (pTableCheckInfo->pDataCols != NULL) {
tfree(pTableCheckInfo->pDataCols->buf);
}
@@ -2341,9 +2329,12 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
taosArrayDestroy(pQueryHandle->pColumns);
tfree(pQueryHandle->pDataBlockInfo);
tfree(pQueryHandle->statis);
-
+
+ // todo check error
+ tsdbUnRefMemTable(pQueryHandle->pTsdb, pQueryHandle->mem);
+ tsdbUnRefMemTable(pQueryHandle->pTsdb, pQueryHandle->imem);
+
tsdbDestroyHelper(&pQueryHandle->rhelper);
-
tfree(pQueryHandle);
}
diff --git a/src/util/inc/tcache.h b/src/util/inc/tcache.h
index 17b38238316bf74d67824b119b774a8cd5804860..b026ad43863995cadb2bd15067b145f660a2f0cd 100644
--- a/src/util/inc/tcache.h
+++ b/src/util/inc/tcache.h
@@ -24,6 +24,8 @@ extern "C" {
#include "tref.h"
#include "hash.h"
+typedef void (*__cache_freeres_fn_t)(void*);
+
typedef struct SCacheStatis {
int64_t missCount;
int64_t hitCount;
@@ -34,14 +36,15 @@ typedef struct SCacheStatis {
typedef struct SCacheDataNode {
uint64_t addedTime; // the added time when this element is added or updated into cache
- uint64_t expiredTime; // expiredTime expiredTime when this element should be remove from cache
+ uint64_t lifespan; // expiredTime expiredTime when this element should be remove from cache
uint64_t signature;
uint32_t size; // allocated size for current SCacheDataNode
- uint16_t keySize: 15;
- bool inTrashCan: 1;// denote if it is in trash or not
T_REF_DECLARE()
- char *key;
- char data[];
+ uint16_t keySize: 15; // max key size: 32kb
+ bool inTrashCan: 1;// denote if it is in trash or not
+ int32_t extendFactor; // number of life span extend
+ char *key;
+ char data[];
} SCacheDataNode;
typedef struct STrashElem {
@@ -62,29 +65,33 @@ typedef struct {
int64_t totalSize; // total allocated buffer in this hash table, SCacheObj is not included.
int64_t refreshTime;
STrashElem * pTrash;
- void * tmrCtrl;
- void * pTimer;
+ char* name;
+// void * tmrCtrl;
+// void * pTimer;
SCacheStatis statistics;
SHashObj * pHashTable;
- _hash_free_fn_t freeFp;
+ __cache_freeres_fn_t freeFp;
uint32_t numOfElemsInTrash; // number of element in trash
uint8_t deleting; // set the deleting flag to stop refreshing ASAP.
pthread_t refreshWorker;
-
+ bool extendLifespan; // auto extend life span when one item is accessed.
#if defined(LINUX)
pthread_rwlock_t lock;
#else
- pthread_mutex_t lock;
+ pthread_mutex_t lock;
#endif
} SCacheObj;
/**
* initialize the cache object
- * @param refreshTime refresh operation interval time, the maximum survival time when one element is expired and
- * not referenced by other objects
+ * @param keyType key type
+ * @param refreshTimeInSeconds refresh operation interval time, the maximum survival time when one element is expired
+ * and not referenced by other objects
+ * @param extendLifespan auto extend lifespan, if accessed
+ * @param fn free resource callback function
* @return
*/
-SCacheObj *taosCacheInit(int64_t refreshTimeInSeconds);
+SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char *cacheName);
/**
* initialize the cache object and set the free object callback function
@@ -92,7 +99,7 @@ SCacheObj *taosCacheInit(int64_t refreshTimeInSeconds);
* @param freeCb
* @return
*/
-SCacheObj *taosCacheInitWithCb(int64_t refreshTimeInSeconds, void (*freeCb)(void *data));
+SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char *cacheName);
/**
* add data into cache
@@ -104,7 +111,7 @@ SCacheObj *taosCacheInitWithCb(int64_t refreshTimeInSeconds, void (*freeCb)(void
* @param keepTime survival time in second
* @return cached element
*/
-void *taosCachePut(SCacheObj *pCacheObj, const char *key, const void *pData, size_t dataSize, int keepTimeInSeconds);
+void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const void *pData, size_t dataSize, int keepTimeInSeconds);
/**
* get data from cache
@@ -112,22 +119,23 @@ void *taosCachePut(SCacheObj *pCacheObj, const char *key, const void *pData, siz
* @param key key
* @return cached data or NULL
*/
-void *taosCacheAcquireByName(SCacheObj *pCacheObj, const char *key);
+void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen);
/**
* update the expire time of data in cache
* @param pCacheObj cache object
* @param key key
+ * @param keyLen keyLen
* @param expireTime new expire time of data
* @return
*/
-void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, const char *key, uint64_t expireTime);
+void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime);
/**
* Add one reference count for the exist data, and assign this data for a new owner.
* The new owner needs to invoke the taosCacheRelease when it does not need this data anymore.
- * This procedure is a faster version of taosCacheAcquireByName function, which avoids the sideeffect of the problem of
- * the data is moved to trash, and taosCacheAcquireByName will fail to retrieve it again.
+ * This procedure is a faster version of taosCacheAcquireByKey function, which avoids the sideeffect of the problem of
+ * the data is moved to trash, and taosCacheAcquireByKey will fail to retrieve it again.
*
* @param handle
* @param data
@@ -148,16 +156,16 @@ void *taosCacheTransfer(SCacheObj *pCacheObj, void **data);
* if it is referenced by other object, it will be remain in cache
* @param handle cache object
* @param data not the key, actually referenced data
- * @param _remove force model, reduce the ref count and move the data into
- * pTrash
+ * @param _remove force model, reduce the ref count and move the data into pTrash
*/
void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove);
/**
* move all data node into trash, clear node in trash can if it is not referenced by any clients
* @param handle
+ * @param _remove remove the data or not if refcount is greater than 0
*/
-void taosCacheEmpty(SCacheObj *pCacheObj);
+void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove);
/**
* release all allocated memory and destroy the cache object.
diff --git a/src/util/inc/tfile.h b/src/util/inc/tfile.h
new file mode 100644
index 0000000000000000000000000000000000000000..5bddc7626618f548d94b1ea02f8d233e68c4d156
--- /dev/null
+++ b/src/util/inc/tfile.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef TDENGINE_TFILE_H
+#define TDENGINE_TFILE_H
+
+#ifdef TAOS_RANDOM_FILE_FAIL
+
+ssize_t taos_tread(int fd, void *buf, size_t count);
+ssize_t taos_twrite(int fd, void *buf, size_t count);
+off_t taos_lseek(int fd, off_t offset, int whence);
+
+#define tread(fd, buf, count) taos_tread(fd, buf, count)
+#define twrite(fd, buf, count) taos_twrite(fd, buf, count)
+#define lseek(fd, offset, whence) taos_lseek(fd, offset, whence)
+
+#endif // TAOS_RANDOM_FILE_FAIL
+
+#endif // TDENGINE_TFILE_H
diff --git a/src/util/src/tcache.c b/src/util/src/tcache.c
index 720741f0893086d4c9c3fb59bee0adfe4dc3f534..d546970868a5cb0ddf69bc54eb0c92c471eeb54d 100644
--- a/src/util/src/tcache.c
+++ b/src/util/src/tcache.c
@@ -118,8 +118,9 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo
int32_t size = pNode->size;
taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize);
-
- uDebug("key:%s, is removed from cache, total:%" PRId64 " size:%d bytes", pNode->key, pCacheObj->totalSize, size);
+
+ uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes",
+ pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, size);
if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data);
free(pNode);
}
@@ -167,7 +168,7 @@ static SCacheDataNode *taosUpdateCacheImpl(SCacheObj *pCacheObj, SCacheDataNode
// update the timestamp information for updated key/value
pNewNode->addedTime = taosGetTimestampMs();
- pNewNode->expiredTime = pNewNode->addedTime + duration;
+ pNewNode->lifespan = duration;
T_REF_INC(pNewNode);
@@ -224,8 +225,8 @@ static void doCleanupDataCache(SCacheObj *pCacheObj);
*/
static void* taosCacheRefresh(void *handle);
-SCacheObj *taosCacheInitWithCb(int64_t refreshTime, void (*freeCb)(void *data)) {
- if (refreshTime <= 0) {
+SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char* cacheName) {
+ if (refreshTimeInSeconds <= 0) {
return NULL;
}
@@ -235,7 +236,8 @@ SCacheObj *taosCacheInitWithCb(int64_t refreshTime, void (*freeCb)(void *data))
return NULL;
}
- pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false);
+ pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), false);
+ pCacheObj->name = strdup(cacheName);
if (pCacheObj->pHashTable == NULL) {
free(pCacheObj);
uError("failed to allocate memory, reason:%s", strerror(errno));
@@ -243,10 +245,9 @@ SCacheObj *taosCacheInitWithCb(int64_t refreshTime, void (*freeCb)(void *data))
}
// set free cache node callback function for hash table
- // taosHashSetFreecb(pCacheObj->pHashTable, taosFreeNode);
-
- pCacheObj->freeFp = freeCb;
- pCacheObj->refreshTime = refreshTime * 1000;
+ pCacheObj->freeFp = fn;
+ pCacheObj->refreshTime = refreshTimeInSeconds * 1000;
+ pCacheObj->extendLifespan = extendLifespan;
if (__cache_lock_init(pCacheObj) != 0) {
taosHashCleanup(pCacheObj->pHashTable);
@@ -256,7 +257,7 @@ SCacheObj *taosCacheInitWithCb(int64_t refreshTime, void (*freeCb)(void *data))
return NULL;
}
- pthread_attr_t thattr;
+ pthread_attr_t thattr = {{0}};
pthread_attr_init(&thattr);
pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
@@ -266,19 +267,13 @@ SCacheObj *taosCacheInitWithCb(int64_t refreshTime, void (*freeCb)(void *data))
return pCacheObj;
}
-SCacheObj *taosCacheInit(int64_t refreshTime) {
- return taosCacheInitWithCb(refreshTime, NULL);
-}
-
-void *taosCachePut(SCacheObj *pCacheObj, const char *key, const void *pData, size_t dataSize, int duration) {
+void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const void *pData, size_t dataSize, int duration) {
SCacheDataNode *pNode;
if (pCacheObj == NULL || pCacheObj->pHashTable == NULL) {
return NULL;
}
-
- size_t keyLen = strlen(key);
-
+
__cache_wr_lock(pCacheObj);
SCacheDataNode **pt = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen);
SCacheDataNode * pOld = (pt != NULL) ? (*pt) : NULL;
@@ -287,75 +282,86 @@ void *taosCachePut(SCacheObj *pCacheObj, const char *key, const void *pData, siz
pNode = taosAddToCacheImpl(pCacheObj, key, keyLen, pData, dataSize, duration * 1000L);
if (NULL != pNode) {
pCacheObj->totalSize += pNode->size;
-
- uDebug("key:%s, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", total:%" PRId64 ", size:%" PRId64 " bytes",
- key, pNode, pNode->addedTime, pNode->expiredTime, pCacheObj->totalSize, dataSize);
+
+ uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d totalSize:%" PRId64
+ "bytes size:%" PRId64 "bytes",
+ pCacheObj->name, key, pNode->data, pNode->addedTime, (pNode->lifespan * pNode->extendFactor + pNode->addedTime),
+ (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, dataSize);
} else {
- uError("key:%s, failed to added into cache, out of memory", key);
+ uError("cache:%s, key:%p, failed to added into cache, out of memory", pCacheObj->name, key);
}
} else { // old data exists, update the node
pNode = taosUpdateCacheImpl(pCacheObj, pOld, key, keyLen, pData, dataSize, duration * 1000L);
- uDebug("key:%s, %p exist in cache, updated", key, pNode);
+ uDebug("cache:%s, key:%p, %p exist in cache, updated", pCacheObj->name, key, pNode->data);
}
-
+
__cache_unlock(pCacheObj);
-
+
return (pNode != NULL) ? pNode->data : NULL;
}
-void *taosCacheAcquireByName(SCacheObj *pCacheObj, const char *key) {
+void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen) {
if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0) {
return NULL;
}
-
- uint32_t keyLen = (uint32_t)strlen(key);
-
+
__cache_rd_lock(pCacheObj);
SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen);
+
+ int32_t ref = 0;
if (ptNode != NULL) {
- T_REF_INC(*ptNode);
+ ref = T_REF_INC(*ptNode);
+
+ // if the remained life span is less then the (*ptNode)->lifeSpan, add up one lifespan
+ if (pCacheObj->extendLifespan) {
+ int64_t now = taosGetTimestampMs();
+
+ if ((now - (*ptNode)->addedTime) < (*ptNode)->lifespan * (*ptNode)->extendFactor) {
+ (*ptNode)->extendFactor += 1;
+ uDebug("key:%p extend life time to %"PRId64, key, (*ptNode)->lifespan * (*ptNode)->extendFactor + (*ptNode)->addedTime);
+ }
+ }
}
-
__cache_unlock(pCacheObj);
if (ptNode != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
- uDebug("key:%s, is retrieved from cache, %p refcnt:%d", key, (*ptNode), T_REF_VAL_GET(*ptNode));
+ uDebug("cache:%s, key:%p, %p is retrieved from cache, refcnt:%d", pCacheObj->name, key, (*ptNode)->data, ref);
} else {
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
- uDebug("key:%s, not in cache, retrieved failed", key);
+ uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key);
}
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
return (ptNode != NULL) ? (*ptNode)->data : NULL;
}
-void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, const char *key, uint64_t expireTime) {
+void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t keyLen, uint64_t expireTime) {
if (pCacheObj == NULL || taosHashGetSize(pCacheObj->pHashTable) == 0) {
return NULL;
}
-
- uint32_t keyLen = (uint32_t)strlen(key);
-
+
__cache_rd_lock(pCacheObj);
SCacheDataNode **ptNode = (SCacheDataNode **)taosHashGet(pCacheObj->pHashTable, key, keyLen);
if (ptNode != NULL) {
T_REF_INC(*ptNode);
- (*ptNode)->expiredTime = expireTime;
+ (*ptNode)->extendFactor += 1;
+// (*ptNode)->lifespan = expireTime;
}
-
+
__cache_unlock(pCacheObj);
-
+
if (ptNode != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
- uDebug("key:%s, expireTime is updated in cache, %p refcnt:%d", key, (*ptNode), T_REF_VAL_GET(*ptNode));
+ uDebug("cache:%s, key:%p, %p expireTime is updated in cache, refcnt:%d", pCacheObj->name, key,
+ (*ptNode)->data, T_REF_VAL_GET(*ptNode));
} else {
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
- uDebug("key:%s, not in cache, retrieved failed", key);
+ uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key);
}
-
+
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
return (ptNode != NULL) ? (*ptNode)->data : NULL;
}
@@ -370,10 +376,21 @@ void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) {
uError("key: %p the data from cache is invalid", ptNode);
return NULL;
}
-
+
int32_t ref = T_REF_INC(ptNode);
- uDebug("%p acquired by data in cache, refcnt:%d", ptNode, ref)
-
+ uDebug("cache:%s, data: %p acquired by data in cache, refcnt:%d", pCacheObj->name, ptNode->data, ref);
+
+ // if the remained life span is less then the (*ptNode)->lifeSpan, add up one lifespan
+ if (pCacheObj->extendLifespan) {
+ int64_t now = taosGetTimestampMs();
+
+ if ((now - ptNode->addedTime) < ptNode->lifespan * ptNode->extendFactor) {
+ ptNode->extendFactor += 1;
+ uDebug("cache:%s, %p extend life time to %" PRId64, pCacheObj->name, ptNode->data,
+ ptNode->lifespan * ptNode->extendFactor + ptNode->addedTime);
+ }
+ }
+
// the data if referenced by at least one object, so the reference count must be greater than the value of 2.
assert(ref >= 2);
return data;
@@ -408,26 +425,32 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
size_t offset = offsetof(SCacheDataNode, data);
SCacheDataNode *pNode = (SCacheDataNode *)((char *)(*data) - offset);
-
if (pNode->signature != (uint64_t)pNode) {
- uError("%p release invalid cache data", pNode);
+ uError("%p, release invalid cache data", pNode);
return;
}
-
+
*data = NULL;
- int32_t ref = T_REF_DEC(pNode);
- uDebug("key:%s, is released, %p refcnt:%d", pNode->key, pNode, ref);
-
- if (_remove) {
+ int16_t ref = T_REF_DEC(pNode);
+ uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref);
+
+ if (_remove && (!pNode->inTrashCan)) {
__cache_wr_lock(pCacheObj);
- // pNode may be released immediately by other thread after the reference count of pNode is set to 0,
- // So we need to lock it in the first place.
- taosCacheMoveToTrash(pCacheObj, pNode);
+
+ if (T_REF_VAL_GET(pNode) == 0) {
+ // remove directly, if not referenced by other users
+ taosCacheReleaseNode(pCacheObj, pNode);
+ } else {
+ // pNode may be released immediately by other thread after the reference count of pNode is set to 0,
+ // So we need to lock it in the first place.
+ taosCacheMoveToTrash(pCacheObj, pNode);
+ }
+
__cache_unlock(pCacheObj);
}
}
-void taosCacheEmpty(SCacheObj *pCacheObj) {
+void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove) {
SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable);
__cache_wr_lock(pCacheObj);
@@ -437,12 +460,16 @@ void taosCacheEmpty(SCacheObj *pCacheObj) {
}
SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
- taosCacheMoveToTrash(pCacheObj, pNode);
+ if (T_REF_VAL_GET(pNode) == 0 || _remove) {
+ taosCacheReleaseNode(pCacheObj, pNode);
+ } else {
+ taosCacheMoveToTrash(pCacheObj, pNode);
+ }
}
__cache_unlock(pCacheObj);
taosHashDestroyIter(pIter);
- taosTrashCanEmpty(pCacheObj, false);
+ taosTrashCanEmpty(pCacheObj, _remove);
}
void taosCacheCleanup(SCacheObj *pCacheObj) {
@@ -453,6 +480,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj) {
pCacheObj->deleting = 1;
pthread_join(pCacheObj->refreshWorker, NULL);
+ uInfo("cache:%s will be cleaned up", pCacheObj->name);
doCleanupDataCache(pCacheObj);
}
@@ -473,11 +501,11 @@ SCacheDataNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *
memcpy(pNewNode->key, key, keyLen);
- pNewNode->addedTime = (uint64_t)taosGetTimestampMs();
- pNewNode->expiredTime = pNewNode->addedTime + duration;
-
- pNewNode->signature = (uint64_t)pNewNode;
- pNewNode->size = (uint32_t)totalSize;
+ pNewNode->addedTime = (uint64_t)taosGetTimestampMs();
+ pNewNode->lifespan = duration;
+ pNewNode->extendFactor = 1;
+ pNewNode->signature = (uint64_t)pNewNode;
+ pNewNode->size = (uint32_t)totalSize;
return pNewNode;
}
@@ -501,7 +529,7 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
pNode->inTrashCan = true;
pCacheObj->numOfElemsInTrash++;
- uDebug("key:%s, %p move to trash, numOfElem in trash:%d", pNode->key, pNode, pCacheObj->numOfElemsInTrash);
+ uDebug("key:%p, %p move to trash, numOfElem in trash:%d", pNode->key, pNode->data, pCacheObj->numOfElemsInTrash);
}
void taosRemoveFromTrashCan(SCacheObj *pCacheObj, STrashElem *pElem) {
@@ -522,7 +550,10 @@ void taosRemoveFromTrashCan(SCacheObj *pCacheObj, STrashElem *pElem) {
}
pElem->pData->signature = 0;
- if (pCacheObj->freeFp) pCacheObj->freeFp(pElem->pData->data);
+ if (pCacheObj->freeFp) {
+ pCacheObj->freeFp(pElem->pData->data);
+ }
+
free(pElem->pData);
free(pElem);
}
@@ -549,7 +580,7 @@ void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) {
}
if (force || (T_REF_VAL_GET(pElem->pData) == 0)) {
- uDebug("key:%s, %p removed from trash. numOfElem in trash:%d", pElem->pData->key, pElem->pData,
+ uDebug("key:%p, %p removed from trash. numOfElem in trash:%d", pElem->pData->key, pElem->pData->data,
pCacheObj->numOfElemsInTrash - 1);
STrashElem *p = pElem;
@@ -569,21 +600,25 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable);
while (taosHashIterNext(pIter)) {
SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
- // if (pNode->expiredTime <= expiredTime && T_REF_VAL_GET(pNode) <= 0) {
- if (T_REF_VAL_GET(pNode) <= 0) {
+
+ int32_t c = T_REF_VAL_GET(pNode);
+ if (c <= 0) {
taosCacheReleaseNode(pCacheObj, pNode);
} else {
- uDebug("key:%s, will not remove from cache, refcnt:%d", pNode->key, T_REF_VAL_GET(pNode));
+ uDebug("cache:%s key:%p, %p will not remove from cache, refcnt:%d", pCacheObj->name, pNode->key,
+ pNode->data, T_REF_VAL_GET(pNode));
}
}
taosHashDestroyIter(pIter);
- taosHashCleanup(pCacheObj->pHashTable);
+ // todo memory leak if there are object with refcount greater than 0 in hash table?
+ taosHashCleanup(pCacheObj->pHashTable);
__cache_unlock(pCacheObj);
taosTrashCanEmpty(pCacheObj, true);
__cache_lock_destroy(pCacheObj);
-
+
+ tfree(pCacheObj->name);
memset(pCacheObj, 0, sizeof(SCacheObj));
free(pCacheObj);
}
@@ -613,27 +648,32 @@ void* taosCacheRefresh(void *handle) {
// reset the count value
count = 0;
- size_t num = taosHashGetSize(pCacheObj->pHashTable);
- if (num == 0) {
+ size_t elemInHash = taosHashGetSize(pCacheObj->pHashTable);
+ if (elemInHash + pCacheObj->numOfElemsInTrash == 0) {
continue;
}
- uint64_t expiredTime = taosGetTimestampMs();
pCacheObj->statistics.refreshCount++;
- SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable);
+ // refresh data in hash table
+ if (elemInHash > 0) {
+ int64_t expiredTime = taosGetTimestampMs();
- __cache_wr_lock(pCacheObj);
- while (taosHashIterNext(pIter)) {
- SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
- if (pNode->expiredTime <= expiredTime && T_REF_VAL_GET(pNode) <= 0) {
- taosCacheReleaseNode(pCacheObj, pNode);
+ SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable);
+
+ __cache_wr_lock(pCacheObj);
+ while (taosHashIterNext(pIter)) {
+ SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
+ if ((pNode->addedTime + pNode->lifespan * pNode->extendFactor) <= expiredTime && T_REF_VAL_GET(pNode) <= 0) {
+ taosCacheReleaseNode(pCacheObj, pNode);
+ }
}
- }
- __cache_unlock(pCacheObj);
+ __cache_unlock(pCacheObj);
+
+ taosHashDestroyIter(pIter);
+ }
- taosHashDestroyIter(pIter);
taosTrashCanEmpty(pCacheObj, false);
}
diff --git a/src/util/src/tfile.c b/src/util/src/tfile.c
new file mode 100644
index 0000000000000000000000000000000000000000..eb7a2d5a66b8923a52f40930878ab0aec20262ba
--- /dev/null
+++ b/src/util/src/tfile.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include "os.h"
+
+#define RANDOM_FILE_FAIL_FACTOR 5
+
+ssize_t taos_tread(int fd, void *buf, size_t count)
+{
+#ifdef TAOS_RANDOM_FILE_FAIL
+ if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) {
+ errno = EIO;
+ return -1;
+ }
+#endif
+
+ return tread(fd, buf, count);
+}
+
+ssize_t taos_twrite(int fd, void *buf, size_t count)
+{
+#ifdef TAOS_RANDOM_FILE_FAIL
+ if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) {
+ errno = EIO;
+ return -1;
+ }
+#endif
+
+ return twrite(fd, buf, count);
+}
+
+off_t taos_lseek(int fd, off_t offset, int whence)
+{
+#ifdef TAOS_RANDOM_FILE_FAIL
+ if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) {
+ errno = EIO;
+ return -1;
+ }
+#endif
+
+ return lseek(fd, offset, whence);
+}
diff --git a/src/util/src/tkvstore.c b/src/util/src/tkvstore.c
index f33941376ffe2888d1d189882c421f8c69974fb3..2a24a59742efdbcc50b24f4e3216283bf0dbad67 100644
--- a/src/util/src/tkvstore.c
+++ b/src/util/src/tkvstore.c
@@ -27,6 +27,7 @@
#include "tcoding.h"
#include "tkvstore.h"
#include "tulog.h"
+#include "tfile.h"
#define TD_KVSTORE_HEADER_SIZE 512
#define TD_KVSTORE_MAJOR_VERSION 1
@@ -581,4 +582,4 @@ _err:
taosHashDestroyIter(pIter);
tfree(buf);
return -1;
-}
\ No newline at end of file
+}
diff --git a/src/util/src/tmem.c b/src/util/src/tmem.c
index ec5f90990b4d84a92f898656f835c71a976676fc..9c512ad8dc5938626d983d8263271f060abdc78c 100644
--- a/src/util/src/tmem.c
+++ b/src/util/src/tmem.c
@@ -193,7 +193,7 @@ static void* realloc_detect_leak(void* ptr, size_t size, const char* file, uint3
return malloc_detect_leak(size, file, line);
}
- SMemBlock* blk = ((char*)ptr) - sizeof(SMemBlock);
+ SMemBlock* blk = (SMemBlock *)((char*)ptr) - sizeof(SMemBlock);
if (blk->magic != MEMBLK_MAGIC) {
if (fpAllocLog != NULL) {
fprintf(fpAllocLog, "%s:%d: memory is allocated by default allocator.\n", file, line);
diff --git a/src/util/src/ttime.c b/src/util/src/ttime.c
index 7fb9738ec75ba722bc4d69ae872d5b4a0994e6ff..6f67c4a1365abeb175447fcb56237d3cfa44fbee 100644
--- a/src/util/src/ttime.c
+++ b/src/util/src/ttime.c
@@ -374,3 +374,34 @@ int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
return getTimestampInUsFromStrImpl(timestamp, token[tokenlen - 1], ts);
}
+
+// internal function, when program is paused in debugger,
+// one can call this function from debugger to print a
+// timestamp as human readable string, for example (gdb):
+// p fmtts(1593769722)
+// outputs:
+// 2020-07-03 17:48:42
+// and the parameter can also be a variable.
+const char* fmtts(int64_t ts) {
+ static char buf[32];
+
+ time_t tt;
+ if (ts > -62135625943 && ts < 32503651200) {
+ tt = ts;
+ } else if (ts > -62135625943000 && ts < 32503651200000) {
+ tt = ts / 1000;
+ } else {
+ tt = ts / 1000000;
+ }
+
+ struct tm* ptm = localtime(&tt);
+ size_t pos = strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", ptm);
+
+ if (ts <= -62135625943000 || ts >= 32503651200000) {
+ sprintf(buf + pos, ".%06d", (int)(ts % 1000000));
+ } else if (ts <= -62135625943 || ts >= 32503651200) {
+ sprintf(buf + pos, ".%03d", (int)(ts % 1000));
+ }
+
+ return buf;
+}
\ No newline at end of file
diff --git a/src/util/src/tutil.c b/src/util/src/tutil.c
index 9c5bffef953b1bd2fc48b8ffc20a60d0e0fa126f..1a74359f47af23d0073f41901f86282e3654e212 100644
--- a/src/util/src/tutil.c
+++ b/src/util/src/tutil.c
@@ -801,6 +801,11 @@ int tmkdir(const char *path, mode_t mode) {
}
void taosMvDir(char* destDir, char *srcDir) {
+ if (0 == tsEnableVnodeBak) {
+ uInfo("vnode backup not enabled");
+ return;
+ }
+
char shellCmd[1024+1] = {0};
//(void)snprintf(shellCmd, 1024, "cp -rf %s %s", srcDir, destDir);
diff --git a/src/util/tests/cacheTest.cpp b/src/util/tests/cacheTest.cpp
index 5762d5700bbb1945490846dcfaaf0e12f0fbaa27..9100b7e7f6c7a7cf9332d6d9f4add30bd2e3c5c3 100644
--- a/src/util/tests/cacheTest.cpp
+++ b/src/util/tests/cacheTest.cpp
@@ -19,12 +19,12 @@ int32_t tsMaxMeterConnections = 200;
// test cache
TEST(testCase, client_cache_test) {
const int32_t REFRESH_TIME_IN_SEC = 2;
- SCacheObj* tscCacheHandle = taosCacheInit(REFRESH_TIME_IN_SEC);
+ SCacheObj* tscCacheHandle = taosCacheInit(TSDB_DATA_TYPE_BINARY, REFRESH_TIME_IN_SEC, 0, NULL, "test");
const char* key1 = "test1";
char data1[] = "test11";
- char* cachedObj = (char*) taosCachePut(tscCacheHandle, key1, data1, strlen(data1)+1, 1);
+ char* cachedObj = (char*) taosCachePut(tscCacheHandle, key1, strlen(key1), data1, strlen(data1)+1, 1);
sleep(REFRESH_TIME_IN_SEC+1);
printf("obj is still valid: %s\n", cachedObj);
@@ -33,7 +33,7 @@ TEST(testCase, client_cache_test) {
taosCacheRelease(tscCacheHandle, (void**) &cachedObj, false);
/* the object is cleared by cache clean operation */
- cachedObj = (char*) taosCachePut(tscCacheHandle, key1, data2, strlen(data2)+1, 20);
+ cachedObj = (char*) taosCachePut(tscCacheHandle, key1, strlen(key1), data2, strlen(data2)+1, 20);
printf("after updated: %s\n", cachedObj);
printf("start to remove data from cache\n");
@@ -43,32 +43,32 @@ TEST(testCase, client_cache_test) {
const char* key3 = "test2";
const char* data3 = "kkkkkkk";
- char* cachedObj2 = (char*) taosCachePut(tscCacheHandle, key3, data3, strlen(data3) + 1, 1);
+ char* cachedObj2 = (char*) taosCachePut(tscCacheHandle, key3, strlen(key3), data3, strlen(data3) + 1, 1);
printf("%s\n", cachedObj2);
taosCacheRelease(tscCacheHandle, (void**) &cachedObj2, false);
sleep(3);
- char* d = (char*) taosCacheAcquireByName(tscCacheHandle, key3);
+ char* d = (char*) taosCacheAcquireByKey(tscCacheHandle, key3, strlen(key3));
// assert(d == NULL);
char key5[] = "test5";
char data5[] = "data5kkkkk";
- cachedObj2 = (char*) taosCachePut(tscCacheHandle, key5, data5, strlen(data5) + 1, 20);
+ cachedObj2 = (char*) taosCachePut(tscCacheHandle, key5, strlen(key5), data5, strlen(data5) + 1, 20);
const char* data6= "new Data after updated";
taosCacheRelease(tscCacheHandle, (void**) &cachedObj2, false);
- cachedObj2 = (char*) taosCachePut(tscCacheHandle, key5, data6, strlen(data6) + 1, 20);
+ cachedObj2 = (char*) taosCachePut(tscCacheHandle, key5, strlen(key5), data6, strlen(data6) + 1, 20);
printf("%s\n", cachedObj2);
taosCacheRelease(tscCacheHandle, (void**) &cachedObj2, true);
const char* data7 = "add call update procedure";
- cachedObj2 = (char*) taosCachePut(tscCacheHandle, key5, data7, strlen(data7) + 1, 20);
+ cachedObj2 = (char*) taosCachePut(tscCacheHandle, key5, strlen(key5), data7, strlen(data7) + 1, 20);
printf("%s\n=======================================\n\n", cachedObj2);
- char* cc = (char*) taosCacheAcquireByName(tscCacheHandle, key5);
+ char* cc = (char*) taosCacheAcquireByKey(tscCacheHandle, key5, strlen(key5));
taosCacheRelease(tscCacheHandle, (void**) &cachedObj2, true);
taosCacheRelease(tscCacheHandle, (void**) &cc, false);
@@ -76,7 +76,7 @@ TEST(testCase, client_cache_test) {
const char* data8 = "ttft";
const char* key6 = "key6";
- char* ft = (char*) taosCachePut(tscCacheHandle, key6, data8, strlen(data8), 20);
+ char* ft = (char*) taosCachePut(tscCacheHandle, key6, strlen(key6), data8, strlen(data8), 20);
taosCacheRelease(tscCacheHandle, (void**) &ft, false);
/**
@@ -85,7 +85,7 @@ TEST(testCase, client_cache_test) {
uint64_t startTime = taosGetTimestampUs();
printf("Cache Performance Test\nstart time:%" PRIu64 "\n", startTime);
for(int32_t i=0; i<1000; ++i) {
- char* dd = (char*) taosCacheAcquireByName(tscCacheHandle, key6);
+ char* dd = (char*) taosCacheAcquireByKey(tscCacheHandle, key6, strlen(key6));
if (dd != NULL) {
// printf("get the data\n");
} else {
@@ -105,7 +105,7 @@ TEST(testCase, client_cache_test) {
TEST(testCase, cache_resize_test) {
const int32_t REFRESH_TIME_IN_SEC = 2;
- auto* pCache = taosCacheInit(REFRESH_TIME_IN_SEC);
+ auto* pCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, REFRESH_TIME_IN_SEC, false, NULL, "test");
char key[256] = {0};
char data[1024] = "abcdefghijk";
@@ -116,7 +116,7 @@ TEST(testCase, cache_resize_test) {
for(int32_t i = 0; i < num; ++i) {
int32_t len = sprintf(key, "abc_%7d", i);
- taosCachePut(pCache, key, data, len, 3600);
+ taosCachePut(pCache, key, strlen(key), data, len, 3600);
}
uint64_t endTime = taosGetTimestampUs();
@@ -125,7 +125,7 @@ TEST(testCase, cache_resize_test) {
startTime = taosGetTimestampUs();
for(int32_t i = 0; i < num; ++i) {
int32_t len = sprintf(key, "abc_%7d", i);
- void* k = taosCacheAcquireByName(pCache, key);
+ void* k = taosCacheAcquireByKey(pCache, key, len);
assert(k != 0);
}
endTime = taosGetTimestampUs();
diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h
index e428eae688505f8930e0f981cb02326fea981752..4f22c7784d535b4190bf9b27e76f9ceed684211c 100644
--- a/src/vnode/inc/vnodeInt.h
+++ b/src/vnode/inc/vnodeInt.h
@@ -53,6 +53,7 @@ typedef struct {
STsdbCfg tsdbCfg;
SSyncCfg syncCfg;
SWalCfg walCfg;
+ void *qMgmt;
char *rootDir;
char db[TSDB_DB_NAME_LEN];
} SVnodeObj;
diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c
index f71f6adefbe3923aa877bdbc0ad3a93ffb5a8cc8..0050de33994c48a89abcb107e350d1dec7e2527c 100644
--- a/src/vnode/src/vnodeMain.c
+++ b/src/vnode/src/vnodeMain.c
@@ -15,19 +15,22 @@
#define _DEFAULT_SOURCE
#include "os.h"
+
+#include "tcache.h"
+#include "cJSON.h"
+#include "dnode.h"
#include "hash.h"
#include "taoserror.h"
#include "taosmsg.h"
-#include "tutil.h"
+#include "tglobal.h"
#include "trpc.h"
#include "tsdb.h"
#include "ttime.h"
#include "ttimer.h"
-#include "cJSON.h"
-#include "tglobal.h"
-#include "dnode.h"
+#include "tutil.h"
#include "vnode.h"
#include "vnodeInt.h"
+#include "query.h"
#define TSDB_VNODE_VERSION_CONTENT_LEN 31
@@ -65,6 +68,12 @@ static void vnodeInit() {
}
}
+void vnodeCleanupResources() {
+ taosHashCleanup(tsDnodeVnodesHash);
+ vnodeModuleInit = PTHREAD_ONCE_INIT;
+ tsDnodeVnodesHash = NULL;
+}
+
int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
int32_t code;
pthread_once(&vnodeModuleInit, vnodeInit);
@@ -279,6 +288,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
if (pVnode->role == TAOS_SYNC_ROLE_MASTER)
cqStart(pVnode->cq);
+ pVnode->qMgmt = qOpenQueryMgmt(pVnode->vgId);
pVnode->events = NULL;
pVnode->status = TAOS_VN_STATUS_READY;
vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode);
@@ -289,7 +299,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
}
int32_t vnodeStartStream(int32_t vnode) {
- SVnodeObj* pVnode = vnodeAccquireVnode(vnode);
+ SVnodeObj* pVnode = vnodeAcquireVnode(vnode);
if (pVnode != NULL) {
tsdbStartStream(pVnode->tsdb);
vnodeRelease(pVnode);
@@ -317,10 +327,13 @@ void vnodeRelease(void *pVnodeRaw) {
assert(refCount >= 0);
if (refCount > 0) {
- vTrace("vgId:%d, release vnode, refCount:%d", vgId, refCount);
+ vDebug("vgId:%d, release vnode, refCount:%d", vgId, refCount);
return;
}
+ qCleanupQueryMgmt(pVnode->qMgmt);
+ pVnode->qMgmt = NULL;
+
if (pVnode->tsdb)
tsdbCloseRepo(pVnode->tsdb, 1);
pVnode->tsdb = NULL;
@@ -355,12 +368,6 @@ void vnodeRelease(void *pVnodeRaw) {
int32_t count = atomic_sub_fetch_32(&tsOpennedVnodes, 1);
vDebug("vgId:%d, vnode is released, vnodes:%d", vgId, count);
-
- if (count <= 0) {
- taosHashCleanup(tsDnodeVnodesHash);
- vnodeModuleInit = PTHREAD_ONCE_INIT;
- tsDnodeVnodesHash = NULL;
- }
}
void *vnodeGetVnode(int32_t vgId) {
@@ -376,22 +383,31 @@ void *vnodeGetVnode(int32_t vgId) {
return *ppVnode;
}
-void *vnodeAccquireVnode(int32_t vgId) {
+void *vnodeAcquireVnode(int32_t vgId) {
SVnodeObj *pVnode = vnodeGetVnode(vgId);
if (pVnode == NULL) return pVnode;
atomic_add_fetch_32(&pVnode->refCount, 1);
- vTrace("vgId:%d, get vnode, refCount:%d", pVnode->vgId, pVnode->refCount);
+ vDebug("vgId:%d, get vnode, refCount:%d", pVnode->vgId, pVnode->refCount);
return pVnode;
}
+void *vnodeAcquireRqueue(void *param) {
+ SVnodeObj *pVnode = param;
+ if (pVnode == NULL) return NULL;
+
+ atomic_add_fetch_32(&pVnode->refCount, 1);
+ vDebug("vgId:%d, get vnode rqueue, refCount:%d", pVnode->vgId, pVnode->refCount);
+ return ((SVnodeObj *)pVnode)->rqueue;
+}
+
void *vnodeGetRqueue(void *pVnode) {
return ((SVnodeObj *)pVnode)->rqueue;
}
void *vnodeGetWqueue(int32_t vgId) {
- SVnodeObj *pVnode = vnodeAccquireVnode(vgId);
+ SVnodeObj *pVnode = vnodeAcquireVnode(vgId);
if (pVnode == NULL) return NULL;
return pVnode->wqueue;
}
@@ -417,6 +433,28 @@ static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) {
pLoad->replica = pVnode->syncCfg.replica;
}
+int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) {
+ if (tsDnodeVnodesHash == NULL) return TSDB_CODE_SUCCESS;
+
+ SHashMutableIterator *pIter = taosHashCreateIter(tsDnodeVnodesHash);
+ while (taosHashIterNext(pIter)) {
+ SVnodeObj **pVnode = taosHashIterGet(pIter);
+ if (pVnode == NULL) continue;
+ if (*pVnode == NULL) continue;
+
+ (*numOfVnodes)++;
+ if (*numOfVnodes >= TSDB_MAX_VNODES) {
+ vError("vgId:%d, too many open vnodes, exist:%d max:%d", (*pVnode)->vgId, *numOfVnodes, TSDB_MAX_VNODES);
+ continue;
+ } else {
+ vnodeList[*numOfVnodes - 1] = (*pVnode)->vgId;
+ }
+ }
+
+ taosHashDestroyIter(pIter);
+ return TSDB_CODE_SUCCESS;
+}
+
void vnodeBuildStatusMsg(void *param) {
SDMStatusMsg *pStatus = param;
SHashMutableIterator *pIter = taosHashCreateIter(tsDnodeVnodesHash);
@@ -435,7 +473,7 @@ void vnodeBuildStatusMsg(void *param) {
void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes) {
for (int32_t i = 0; i < numOfVnodes; ++i) {
pAccess[i].vgId = htonl(pAccess[i].vgId);
- SVnodeObj *pVnode = vnodeAccquireVnode(pAccess[i].vgId);
+ SVnodeObj *pVnode = vnodeAcquireVnode(pAccess[i].vgId);
if (pVnode != NULL) {
pVnode->accessState = pAccess[i].accessState;
if (pVnode->accessState != TSDB_VN_ALL_ACCCESS) {
@@ -459,6 +497,7 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
vTrace("vgId:%d, vnode will cleanup, refCount:%d", pVnode->vgId, pVnode->refCount);
// release local resources only after cutting off outside connections
+ qSetQueryMgmtClosed(pVnode->qMgmt);
vnodeRelease(pVnode);
}
@@ -848,12 +887,12 @@ static int32_t vnodeReadVersion(SVnodeObj *pVnode) {
goto PARSE_OVER;
}
- cJSON *version = cJSON_GetObjectItem(root, "version");
- if (!version || version->type != cJSON_Number) {
+ cJSON *ver = cJSON_GetObjectItem(root, "version");
+ if (!ver || ver->type != cJSON_Number) {
vError("vgId:%d, failed to read vnode version, version not found", pVnode->vgId);
goto PARSE_OVER;
}
- pVnode->version = version->valueint;
+ pVnode->version = ver->valueint;
terrno = TSDB_CODE_SUCCESS;
vInfo("vgId:%d, read vnode version successfully, version:%" PRId64, pVnode->vgId, pVnode->version);
diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c
index 0c08c77e329828396e7c6760814f15e247c8977d..354caf2af5d95be8d3d932ce5541a0f625e8f185 100644
--- a/src/vnode/src/vnodeRead.c
+++ b/src/vnode/src/vnodeRead.c
@@ -14,21 +14,23 @@
*/
#define _DEFAULT_SOURCE
+#include
#include "os.h"
-#include "taosmsg.h"
+
+#include "tglobal.h"
#include "taoserror.h"
-#include "tqueue.h"
+#include "taosmsg.h"
+#include "tcache.h"
+#include "query.h"
#include "trpc.h"
#include "tsdb.h"
-#include "twal.h"
-#include "tdataformat.h"
#include "vnode.h"
#include "vnodeInt.h"
-#include "query.h"
static int32_t (*vnodeProcessReadMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *pVnode, SReadMsg *pReadMsg);
static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg);
static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg);
+static int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId);
void vnodeInitReadFp(void) {
vnodeProcessReadMsgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessQueryMsg;
@@ -58,19 +60,6 @@ int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) {
return (*vnodeProcessReadMsgFp[msgType])(pVnode, pReadMsg);
}
-// notify connection(handle) that current qhandle is created, if current connection from
-// client is broken, the query needs to be killed immediately.
-static int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId) {
- SRetrieveTableMsg* killQueryMsg = rpcMallocCont(sizeof(SRetrieveTableMsg));
- killQueryMsg->qhandle = htobe64((uint64_t) qhandle);
- killQueryMsg->free = htons(1);
- killQueryMsg->header.vgId = htonl(vgId);
- killQueryMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg));
-
- vDebug("QInfo:%p register qhandle to connect:%p", qhandle, handle);
- return rpcReportProgress(handle, (char*) killQueryMsg, sizeof(SRetrieveTableMsg));
-}
-
static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
void * pCont = pReadMsg->pCont;
int32_t contLen = pReadMsg->contLen;
@@ -85,59 +74,82 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
killQueryMsg->free = htons(killQueryMsg->free);
killQueryMsg->qhandle = htobe64(killQueryMsg->qhandle);
- vWarn("QInfo:%p connection %p broken, kill query", (void*)killQueryMsg->qhandle, pReadMsg->rpcMsg.handle);
+ void* handle = NULL;
+ if ((void**) killQueryMsg->qhandle != NULL) {
+ handle = *(void**) killQueryMsg->qhandle;
+ }
+
+ vWarn("QInfo:%p connection %p broken, kill query", handle, pReadMsg->rpcMsg.handle);
assert(pReadMsg->rpcMsg.contLen > 0 && killQueryMsg->free == 1);
- // this message arrived here by means of the query message, so release the vnode is necessary
- qKillQuery((qinfo_t) killQueryMsg->qhandle, vnodeRelease, pVnode);
- vnodeRelease(pVnode);
+ void** qhandle = qAcquireQInfo(pVnode->qMgmt, (void**) killQueryMsg->qhandle);
+ if (qhandle == NULL || *qhandle == NULL) {
+ vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void*) killQueryMsg->qhandle, pReadMsg->rpcMsg.handle);
+ } else {
+ assert(qhandle == (void**) killQueryMsg->qhandle);
+ qReleaseQInfo(pVnode->qMgmt, (void**) &qhandle, true);
+ }
- return TSDB_CODE_TSC_QUERY_CANCELLED; // todo change the error code
+ return TSDB_CODE_TSC_QUERY_CANCELLED;
}
int32_t code = TSDB_CODE_SUCCESS;
qinfo_t pQInfo = NULL;
+ void** handle = NULL;
if (contLen != 0) {
- code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, &pQInfo);
+ code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, NULL, &pQInfo);
SQueryTableRsp *pRsp = (SQueryTableRsp *) rpcMallocCont(sizeof(SQueryTableRsp));
- pRsp->qhandle = htobe64((uint64_t) (pQInfo));
- pRsp->code = code;
+ pRsp->code = code;
+ pRsp->qhandle = 0;
pRet->len = sizeof(SQueryTableRsp);
pRet->rsp = pRsp;
+ int32_t vgId = pVnode->vgId;
// current connect is broken
if (code == TSDB_CODE_SUCCESS) {
- if (vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, pQInfo, pVnode->vgId) != TSDB_CODE_SUCCESS) {
- vError("vgId:%d, QInfo:%p, dnode query discarded since link is broken, %p", pVnode->vgId, pQInfo,
- pReadMsg->rpcMsg.handle);
- pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
+ // add lock here
+ handle = qRegisterQInfo(pVnode->qMgmt, pQInfo);
+ if (handle == NULL) { // failed to register qhandle
+ pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE;
+
+ qKillQuery(pQInfo);
+ qKillQuery(pQInfo);
+ } else {
+ assert(*handle == pQInfo);
+ pRsp->qhandle = htobe64((uint64_t) (handle));
+ }
- // NOTE: there two refcount, needs to kill twice, todo refactor
- qKillQuery(pQInfo, vnodeRelease, pVnode);
- qKillQuery(pQInfo, vnodeRelease, pVnode);
+ if (handle != NULL && vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, handle, pVnode->vgId) != TSDB_CODE_SUCCESS) {
+ vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, pQInfo, pReadMsg->rpcMsg.handle);
+ pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
+ // NOTE: there two refcount, needs to kill twice
+ // query has not been put into qhandle pool, kill it directly.
+ qKillQuery(pQInfo);
+ qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
return pRsp->code;
}
-
- vTrace("vgId:%d, QInfo:%p, dnode query msg disposed", pVnode->vgId, pQInfo);
} else {
assert(pQInfo == NULL);
- vnodeRelease(pVnode);
}
- vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", pVnode->vgId, pQInfo);
+ vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo);
} else {
assert(pCont != NULL);
- pQInfo = pCont;
+ pQInfo = *(void**)(pCont);
+ handle = pCont;
code = TSDB_CODE_VND_ACTION_IN_PROGRESS;
+
vDebug("vgId:%d, QInfo:%p, dnode query msg in progress", pVnode->vgId, pQInfo);
}
if (pQInfo != NULL) {
- qTableQuery(pQInfo, vnodeRelease, pVnode); // do execute query
+ qTableQuery(pQInfo); // do execute query
+ assert(handle != NULL);
+ qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false);
}
return code;
@@ -148,46 +160,69 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
SRspRet *pRet = &pReadMsg->rspRet;
SRetrieveTableMsg *pRetrieve = pCont;
- void *pQInfo = (void*) htobe64(pRetrieve->qhandle);
+ void **pQInfo = (void*) htobe64(pRetrieve->qhandle);
pRetrieve->free = htons(pRetrieve->free);
- memset(pRet, 0, sizeof(SRspRet));
-
- if (pRetrieve->free == 1) {
- vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pQInfo);
- int32_t ret = qKillQuery(pQInfo, vnodeRelease, pVnode);
+ vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed", pVnode->vgId, *pQInfo);
- pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
- pRet->len = sizeof(SRetrieveTableRsp);
+ memset(pRet, 0, sizeof(SRspRet));
+ int32_t ret = 0;
- memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
- SRetrieveTableRsp* pRsp = pRet->rsp;
- pRsp->numOfRows = 0;
- pRsp->completed = true;
- pRsp->useconds = 0;
+ void** handle = qAcquireQInfo(pVnode->qMgmt, pQInfo);
+ if (handle == NULL || handle != pQInfo) {
+ ret = TSDB_CODE_QRY_INVALID_QHANDLE;
+ }
+ if (pRetrieve->free == 1) {
+ if (ret == TSDB_CODE_SUCCESS) {
+ vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pQInfo);
+ qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
+
+ pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
+ pRet->len = sizeof(SRetrieveTableRsp);
+
+ memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
+ SRetrieveTableRsp* pRsp = pRet->rsp;
+ pRsp->numOfRows = 0;
+ pRsp->completed = true;
+ pRsp->useconds = 0;
+ } else { // todo handle error
+ qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
+ }
return ret;
}
- vDebug("vgId:%d, QInfo:%p, retrieve msg is received", pVnode->vgId, pQInfo);
-
- int32_t code = qRetrieveQueryResultInfo(pQInfo);
- if (code != TSDB_CODE_SUCCESS) {
+ int32_t code = qRetrieveQueryResultInfo(*pQInfo);
+ if (code != TSDB_CODE_SUCCESS || ret != TSDB_CODE_SUCCESS) {
//TODO
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
+
} else {
// todo check code and handle error in build result set
- code = qDumpRetrieveResult(pQInfo, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len);
+ code = qDumpRetrieveResult(*pQInfo, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len);
- if (qHasMoreResultsToRetrieve(pQInfo)) {
- pRet->qhandle = pQInfo;
- code = TSDB_CODE_VND_ACTION_NEED_REPROCESSED;
+ if (qHasMoreResultsToRetrieve(*handle)) {
+ dnodePutItemIntoReadQueue(pVnode, handle);
+ pRet->qhandle = handle;
+ code = TSDB_CODE_SUCCESS;
} else { // no further execution invoked, release the ref to vnode
- qDestroyQueryInfo(pQInfo, vnodeRelease, pVnode);
+ qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
}
}
-
- vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed", pVnode->vgId, pQInfo);
+
return code;
}
+
+// notify connection(handle) that current qhandle is created, if current connection from
+// client is broken, the query needs to be killed immediately.
+int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId) {
+ SRetrieveTableMsg* killQueryMsg = rpcMallocCont(sizeof(SRetrieveTableMsg));
+ killQueryMsg->qhandle = htobe64((uint64_t) qhandle);
+ killQueryMsg->free = htons(1);
+ killQueryMsg->header.vgId = htonl(vgId);
+ killQueryMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg));
+
+ vDebug("QInfo:%p register qhandle to connect:%p", qhandle, handle);
+ return rpcReportProgress(handle, (char*) killQueryMsg, sizeof(SRetrieveTableMsg));
+}
\ No newline at end of file
diff --git a/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml b/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml
index f6728359e5e58303d48b21c71325d9c0d5c6fe89..4e307db07935be293e4ec74566aa158fb1fece9e 100644
--- a/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml
+++ b/tests/comparisonTest/opentsdb/opentsdbtest/pom.xml
@@ -94,7 +94,7 @@
com.google.guava
guava
- 18.0
+ 24.1.1
diff --git a/tests/pytest/import_merge/importDataLastSub.py b/tests/pytest/import_merge/importDataLastSub.py
index 5c2069c90f52b4c786739f4d58e1ce41185c0f93..bfcad2d252cccb2404b3989c474310e0a19afe2e 100644
--- a/tests/pytest/import_merge/importDataLastSub.py
+++ b/tests/pytest/import_merge/importDataLastSub.py
@@ -32,6 +32,7 @@ class TDTestCase:
tdDnodes.stop(1)
tdDnodes.deploy(1)
tdDnodes.start(1)
+ tdLog.sleep(5)
tdSql.execute('reset query cache')
tdSql.execute('drop database if exists db')
@@ -61,6 +62,7 @@ class TDTestCase:
tdDnodes.stop(1)
tdLog.sleep(5)
tdDnodes.start(1)
+ tdLog.sleep(5)
tdLog.info("================= step5")
tdLog.info("import 10 data totally repetitive")
diff --git a/tests/pytest/query/queryMetaData.py b/tests/pytest/query/queryMetaData.py
index 8fb9d9bf3a7a1648607f7943ac807a2ed69d8222..7b95e4a81cb3807062050099008aa3f73fbb4dab 100755
--- a/tests/pytest/query/queryMetaData.py
+++ b/tests/pytest/query/queryMetaData.py
@@ -22,7 +22,7 @@ class MetadataQuery:
def initConnection(self):
self.tables = 100000
self.records = 10
- self.numOfTherads = 10
+ self.numOfTherads = 20
self.ts = 1537146000000
self.host = "127.0.0.1"
self.user = "root"
@@ -55,10 +55,10 @@ class MetadataQuery:
def createTablesAndInsertData(self, threadID):
cursor = self.connectDB()
- cursor.execute("use test")
- base = threadID * self.tables
+ cursor.execute("use test")
tablesPerThread = int (self.tables / self.numOfTherads)
+ base = threadID * tablesPerThread
for i in range(tablesPerThread):
cursor.execute(
'''create table t%d using meters tags(
@@ -75,12 +75,11 @@ class MetadataQuery:
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100))
- for j in range(self.records):
- cursor.execute(
- "insert into t%d values(%d, %d)" %
- (base + i + 1, self.ts + j, j))
- cursor.close()
- self.conn.close()
+
+ cursor.execute(
+ "insert into t%d values(%d, 1) (%d, 2) (%d, 3) (%d, 4) (%d, 5)" %
+ (base + i + 1, self.ts + 1, self.ts + 2, self.ts + 3, self.ts + 4, self.ts + 5))
+ cursor.close()
def queryData(self, query):
cursor = self.connectDB()
@@ -108,12 +107,17 @@ if __name__ == '__main__':
print(
"================= Create %d tables and insert %d records into each table =================" %
(t.tables, t.records))
- startTime = datetime.now()
+ startTime = datetime.now()
+ threads = []
for i in range(t.numOfTherads):
thread = threading.Thread(
target=t.createTablesAndInsertData, args=(i,))
thread.start()
- thread.join()
+ threads.append(thread)
+
+ for th in threads:
+ th.join()
+
endTime = datetime.now()
diff = (endTime - startTime).seconds
print(
diff --git a/tests/pytest/query/queryMetaPerformace.py b/tests/pytest/query/queryMetaPerformace.py
new file mode 100644
index 0000000000000000000000000000000000000000..0570311b08bf0c50667897b3f7901784cbb17ff5
--- /dev/null
+++ b/tests/pytest/query/queryMetaPerformace.py
@@ -0,0 +1,149 @@
+
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+import threading
+import time
+from datetime import datetime
+import numpy as np
+
+class MyThread(threading.Thread):
+
+ def __init__(self, func, args=()):
+ super(MyThread, self).__init__()
+ self.func = func
+ self.args = args
+
+ def run(self):
+ self.result = self.func(*self.args)
+
+ def get_result(self):
+ try:
+ return self.result # 如果子线程不使用join方法,此处可能会报没有self.result的错误
+ except Exception:
+ return None
+
+class MetadataQuery:
+ def initConnection(self):
+ self.tables = 100
+ self.records = 10
+ self.numOfTherads =5
+ self.ts = 1537146000000
+ self.host = "127.0.0.1"
+ self.user = "root"
+ self.password = "taosdata"
+ self.config = "/etc/taos"
+ self.conn = taos.connect( self.host, self.user, self.password, self.config)
+ def connectDB(self):
+ return self.conn.cursor()
+
+ def createStable(self):
+ print("================= Create stable meters =================")
+ cursor = self.connectDB()
+ cursor.execute("drop database if exists test")
+ cursor.execute("create database test")
+ cursor.execute("use test")
+ cursor.execute('''create table if not exists meters (ts timestamp, speed int) tags(
+ tgcol1 tinyint, tgcol2 smallint, tgcol3 int, tgcol4 bigint, tgcol5 float, tgcol6 double, tgcol7 bool, tgcol8 binary(20), tgcol9 nchar(20),
+ tgcol10 tinyint, tgcol11 smallint, tgcol12 int, tgcol13 bigint, tgcol14 float, tgcol15 double, tgcol16 bool, tgcol17 binary(20), tgcol18 nchar(20),
+ tgcol19 tinyint, tgcol20 smallint, tgcol21 int, tgcol22 bigint, tgcol23 float, tgcol24 double, tgcol25 bool, tgcol26 binary(20), tgcol27 nchar(20),
+ tgcol28 tinyint, tgcol29 smallint, tgcol30 int, tgcol31 bigint, tgcol32 float, tgcol33 double, tgcol34 bool, tgcol35 binary(20), tgcol36 nchar(20),
+ tgcol37 tinyint, tgcol38 smallint, tgcol39 int, tgcol40 bigint, tgcol41 float, tgcol42 double, tgcol43 bool, tgcol44 binary(20), tgcol45 nchar(20),
+ tgcol46 tinyint, tgcol47 smallint, tgcol48 int, tgcol49 bigint, tgcol50 float, tgcol51 double, tgcol52 bool, tgcol53 binary(20), tgcol54 nchar(20))''')
+ cursor.close()
+
+ def createTablesAndInsertData(self, threadID):
+ cursor = self.connectDB()
+ cursor.execute("use test")
+ base = threadID * self.tables
+
+ tablesPerThread = int (self.tables / self.numOfTherads)
+ for i in range(tablesPerThread):
+ cursor.execute(
+ '''create table t%d using meters tags(
+ %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
+ %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
+ %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
+ %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
+ %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
+ %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')''' %
+ (base + i + 1,
+ (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
+ (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
+ (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
+ (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
+ (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
+ (base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100))
+ for j in range(self.records):
+ cursor.execute(
+ "insert into t%d values(%d, %d)" %
+ (base + i + 1, self.ts + j, j))
+ cursor.close()
+ def queryWithTagId(self, threadId, tagId, queryNum):
+ print("---------thread%d start-----------"%threadId)
+ query = '''select tgcol1, tgcol2, tgcol3, tgcol4, tgcol5, tgcol6, tgcol7, tgcol8, tgcol9,
+ tgcol10, tgcol11, tgcol12, tgcol13, tgcol14, tgcol15, tgcol16, tgcol17, tgcol18,
+ tgcol19, tgcol20, tgcol21, tgcol22, tgcol23, tgcol24, tgcol25, tgcol26, tgcol27,
+ tgcol28, tgcol29, tgcol30, tgcol31, tgcol32, tgcol33, tgcol34, tgcol35, tgcol36,
+ tgcol37, tgcol38, tgcol39, tgcol40, tgcol41, tgcol42, tgcol43, tgcol44, tgcol45,
+ tgcol46, tgcol47, tgcol48, tgcol49, tgcol50, tgcol51, tgcol52, tgcol53, tgcol54
+ from meters where tgcol{id} > {condition}'''
+ latancy = []
+ cursor = self.connectDB()
+ cursor.execute("use test")
+ for i in range(queryNum):
+ startTime = time.time()
+ cursor.execute(query.format(id = tagId, condition = i))
+ cursor.fetchall()
+ latancy.append((time.time() - startTime))
+ print("---------thread%d end-----------"%threadId)
+ return latancy
+ def queryData(self, query):
+ cursor = self.connectDB()
+ cursor.execute("use test")
+
+ print("================= query tag data =================")
+ startTime = datetime.now()
+ cursor.execute(query)
+ cursor.fetchall()
+ endTime = datetime.now()
+ print(
+ "Query time for the above query is %d seconds" %
+ (endTime - startTime).seconds)
+
+ cursor.close()
+ #self.conn.close()
+
+
+if __name__ == '__main__':
+
+ t = MetadataQuery()
+ t.initConnection()
+
+ latancys = []
+ threads = []
+ tagId = 1
+ queryNum = 1000
+ for i in range(t.numOfTherads):
+ thread = MyThread(t.queryWithTagId, args = (i, tagId, queryNum))
+ threads.append(thread)
+ thread.start()
+ for i in range(t.numOfTherads):
+ threads[i].join()
+ latancys.extend(threads[i].get_result())
+ print("Total query: %d"%(queryNum * t.numOfTherads))
+ print("statistic(s): mean= %f, P50 = %f, P75 = %f, P95 = %f, P99 = %f"
+ %(sum(latancys)/(queryNum * t.numOfTherads), np.percentile(latancys, 50), np.percentile(latancys, 75), np.percentile(latancys, 95), np.percentile(latancys, 99)))
+
diff --git a/tests/pytest/regressiontest.sh b/tests/pytest/regressiontest.sh
index 8ce1fd1294ffca8c5aaf2d036f953ae16e2a4952..eada5f67f7dae0d9395d869cfd0e368bb5d71f5a 100755
--- a/tests/pytest/regressiontest.sh
+++ b/tests/pytest/regressiontest.sh
@@ -137,6 +137,7 @@ python3 ./test.py -f query/filterFloatAndDouble.py
python3 ./test.py -f query/filterOtherTypes.py
python3 ./test.py -f query/queryError.py
python3 ./test.py -f query/querySort.py
+python3 ./test.py -f query/queryJoin.py
#stream
python3 ./test.py -f stream/stream1.py
diff --git a/tests/pytest/test.py b/tests/pytest/test.py
index 5b35563e1bcb041fdcb2389c4609d91b3fa71e1a..a9da8e5671a588409954735f79c7323136aa5c4c 100644
--- a/tests/pytest/test.py
+++ b/tests/pytest/test.py
@@ -81,7 +81,7 @@ if __name__ == "__main__":
else:
toBeKilled = "valgrind.bin"
- killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP " % toBeKilled
+ killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP > /dev/null 2>&1" % toBeKilled
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(psCmd, shell=True)
@@ -91,8 +91,17 @@ if __name__ == "__main__":
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
- fuserCmd = "fuser -k -n tcp 6030"
- os.system(fuserCmd)
+ for port in range(6030, 6041):
+ usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port
+ processID = subprocess.check_output(usePortPID, shell=True)
+
+ if processID:
+ killCmd = "kill -9 %s" % processID
+ os.system(killCmd)
+ fuserCmd = "fuser -k -n tcp %d" % port
+ os.system(fuserCmd)
+ if valgrind:
+ time.sleep(2)
tdLog.info('stop All dnodes')
sys.exit(0)
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index e24af473f38274d3a1122362d037fc00099c1528..370af1ba136c0a24ae5c8e89f87291fa2a624fc0 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -251,11 +251,16 @@ class TDDnode:
psCmd, shell=True).decode("utf-8")
while(processID):
- killCmd = "kill -INT %s" % processID
+ killCmd = "kill -INT %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
+ for port in range(6030, 6041):
+ fuserCmd = "fuser -k -n tcp %d" % port
+ os.system(fuserCmd)
+ if self.valgrind:
+ time.sleep(2)
self.running = 0
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
@@ -272,11 +277,16 @@ class TDDnode:
psCmd, shell=True).decode("utf-8")
while(processID):
- killCmd = "kill -KILL %s" % processID
+ killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
+ for port in range(6030, 6041):
+ fuserCmd = "fuser -k -n tcp %d" % port
+ os.system(fuserCmd)
+ if self.valgrind:
+ time.sleep(2)
self.running = 0
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
@@ -325,7 +335,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
- killCmd = "kill -KILL %s" % processID
+ killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
@@ -334,7 +344,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
- killCmd = "kill -KILL %s" % processID
+ killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
@@ -440,7 +450,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
- killCmd = "kill -KILL %s" % processID
+ killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
@@ -449,7 +459,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
- killCmd = "kill -KILL %s" % processID
+ killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
diff --git a/tests/script/general/http/grafana.sim b/tests/script/general/http/grafana.sim
index dac7552edf710952ef6539e8ec74f57419df9bda..bf2a3b3b58a9469c18ec17c3c27b4fa45ff9813f 100644
--- a/tests/script/general/http/grafana.sim
+++ b/tests/script/general/http/grafana.sim
@@ -54,43 +54,43 @@ print =============== step2 - login
system_content curl 127.0.0.1:6020/grafana/
print 1-> $system_content
-if $system_content != @{"status":"error","code":1011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:6020/grafana/xx
print 2-> $system_content
-if $system_content != @{"status":"error","code":1011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:6020/grafana/login/xx/xx/
print 3-> $system_content
-if $system_content != @{"status":"error","code":1000,"desc":"mnode invalid user"}@ then
+if $system_content != @{"status":"error","code":849,"desc":"mnode invalid user"}@ then
return -1
endi
system_content curl 127.0.0.1:6020/grafana/root/1/123/1/1/3
print 4-> $system_content
-if $system_content != @{"status":"error","code":1011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6020/grafana/login/1/root/1/
print 5-> $system_content
-if $system_content != @{"status":"error","code":1000,"desc":"mnode invalid user"}@ then
+if $system_content != @{"status":"error","code":849,"desc":"mnode invalid user"}@ then
return -1
endi
system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:6020/grafana/root/1/login
print 6-> $system_content
-if $system_content != @{"status":"error","code":1010,"desc":"invalid type of Authorization"}@ then
+if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then
return -1
endi
system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:6020/grafana/root/1/login
print 7-> $system_content
-if $system_content != @{"status":"error","code":1010,"desc":"invalid type of Authorization"}@ then
+if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then
return -1
endi
diff --git a/tests/script/general/http/restful.sim b/tests/script/general/http/restful.sim
index 5ee8bde46fd38aa99508673a7dded5bef3c630a2..7f39a9c74824b2696c59d1b8c179e776bb87214d 100644
--- a/tests/script/general/http/restful.sim
+++ b/tests/script/general/http/restful.sim
@@ -77,7 +77,7 @@ endi
system_content curl 127.0.0.1:6020/rest/login/u2/aabcd_1234
print curl 127.0.0.1:6020/rest/login/u2/abcd_1234 -----> $system_content
-if $system_content != @{"status":"error","code":1000,"desc":"auth failure"}@ then
+if $system_content != @{"status":"error","code":3,"desc":"auth failure"}@ then
return -1
endi
diff --git a/tests/script/general/http/restful_full.sim b/tests/script/general/http/restful_full.sim
index aa667daee48b10b8759a55def48eb6ac6dfff44e..60ffa4cb2814ae7bf495b87b05a2f41197d36ef0 100644
--- a/tests/script/general/http/restful_full.sim
+++ b/tests/script/general/http/restful_full.sim
@@ -14,57 +14,57 @@ print =============== step1 - login
system_content curl 127.0.0.1:6020/rest/
print 1-> $system_content
-if $system_content != @{"status":"error","code":1011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:6020/rest/xx
print 2-> $system_content
-if $system_content != @{"status":"error","code":1011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:6020/rest/login
print 3-> $system_content
-if $system_content != @{"status":"error","code":1011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
return -1
endi
#4
system_content curl 127.0.0.1:6020/rest/login/root
print 4-> $system_content
-if $system_content != @{"status":"error","code":1011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:6020/rest/login/root/123
print 5-> $system_content
-if $system_content != @{"status":"error","code":1000,"desc":"auth failure"}@ then
+if $system_content != @{"status":"error","code":3,"desc":"auth failure"}@ then
return -1
endi
system_content curl 127.0.0.1:6020/rest/login/root/123/1/1/3
print 6-> $system_content
-if $system_content != @{"status":"error","code":1000,"desc":"auth failure"}@ then
+if $system_content != @{"status":"error","code":3,"desc":"auth failure"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'show databases' 127.0.0.1:6020/rest/login/root/1
print 7-> $system_content
-if $system_content != @{"status":"error","code":1000,"desc":"auth failure"}@ then
+if $system_content != @{"status":"error","code":3,"desc":"auth failure"}@ then
return -1
endi
#8
system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:6020/rest/login/root/1
print 8-> $system_content
-if $system_content != @{"status":"error","code":1010,"desc":"invalid type of Authorization"}@ then
+if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then
return -1
endi
system_content curl -H 'Authorization: eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' -d 'show databases' 127.0.0.1:6020/rest/login/root/1
print 9-> $system_content
-if $system_content != @{"status":"error","code":1010,"desc":"invalid type of Authorization"}@ then
+if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then
return -1
endi
@@ -93,40 +93,40 @@ endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'create database d1' 127.0.0.1:6020/rest/sql
print 13-> $system_content
-if $system_content != @{"status":"error","code":1000,"desc":"mnode database aleady exist"}@ then
+if $system_content != @{"status":"error","code":897,"desc":"mnode database aleady exist"}@ then
return -1
endi
#14
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '' 127.0.0.1:6020/rest/sql
print 14-> $system_content
-if $system_content != @{"status":"error","code":1012,"desc":"no sql input"}@ then
+if $system_content != @{"status":"error","code":5012,"desc":"no sql input"}@ then
return -1
endi
#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1' 127.0.0.1:6020/rest/sql
#print 15-> $system_content
-#if $system_content != @{"status":"error","code":1017,"desc":"no need to execute use db cmd"}@ then
+#if $system_content != @{"status":"error","code":5017,"desc":"no need to execute use db cmd"}@ then
#if $system_content != @{"status":"succ","head":["affected_rows"],"data":[[1]],"rows":1}@ then
# return -1
#endi
#system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' use d1' 127.0.0.1:6020/rest/sql
#print 16-> $system_content
-#if $system_content != @{"status":"error","code":1017,"desc":"no need to execute use db cmd"}@ then
+#if $system_content != @{"status":"error","code":5017,"desc":"no need to execute use db cmd"}@ then
# return -1
#endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' used1' 127.0.0.1:6020/rest/sql
print 17-> $system_content
-if $system_content != @{"status":"error","code":1000,"desc":"invalid SQL: invalid SQL: syntax error near 'used1'"}@ then
+if $system_content != @{"status":"error","code":512,"desc":"invalid SQL: invalid SQL: syntax error near 'used1'"}@ then
return -1
endi
#18
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' show tables;' 127.0.0.1:6020/rest/sql
print 18-> $system_content
-if $system_content != @{"status":"error","code":1000,"desc":"mnode db not selected"}@ then
+if $system_content != @{"status":"error","code":896,"desc":"mnode db not selected"}@ then
return -1
endi
@@ -147,7 +147,7 @@ print =============== step3 - db
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d ' select * from d1.t1;' 127.0.0.1:6020/rest/sql
print 21-> $system_content
-if $system_content != @{"status":"error","code":1000,"desc":"mnode invalid table name"}@ then
+if $system_content != @{"status":"error","code":866,"desc":"mnode invalid table name"}@ then
return -1
endi
diff --git a/tests/script/general/http/telegraf.sim b/tests/script/general/http/telegraf.sim
index d9f3340f06fa80dad2291ceca6d17af07a4be923..1ed73f04ad60a81dcc029b61edce002e244ef496 100644
--- a/tests/script/general/http/telegraf.sim
+++ b/tests/script/general/http/telegraf.sim
@@ -16,224 +16,224 @@ print =============== step1 - parse
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/
print $system_content
-if $system_content != @{"status":"error","code":1022,"desc":"database name can not be null"}@ then
+if $system_content != @{"status":"error","code":5022,"desc":"database name can not be null"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/
print $system_content
-if $system_content != @{"status":"error","code":1022,"desc":"database name can not be null"}@ then
+if $system_content != @{"status":"error","code":5022,"desc":"database name can not be null"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'select * from d1.table_admin' -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/d123456789012345678901234567890123456
print $system_content
-if $system_content != @{"status":"error","code":1023,"desc":"database name too long"}@ then
+if $system_content != @{"status":"error","code":5023,"desc":"database name too long"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[]' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1027,"desc":"metric name not find"}@ then
+if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1027,"desc":"metric name not find"}@ then
+if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1027,"desc":"metric name not find"}@ then
+if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"metrics": []}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1025,"desc":"metrics size is 0"}@ then
+if $system_content != @{"status":"error","code":5025,"desc":"metrics size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"metrics": [{}]}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1027,"desc":"metric name not find"}@ then
+if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"metrics": 12}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1025,"desc":"metrics size is 0"}@ then
+if $system_content != @{"status":"error","code":5025,"desc":"metrics size is 0"}@ then
return -1
endi
#system_content curl -u root:taosdata -d '{"metrics": [{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}]}' 127.0.0.1:6020/telegraf/db/root/taosdata1
#print $system_content
-#if $system_content != @{"status":"error","code":1026,"desc":"metrics size can not more than 50"}@ then
+#if $system_content != @{"status":"error","code":5026,"desc":"metrics size can not more than 50"}@ then
# return -1
#endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1027,"desc":"metric name not find"}@ then
+if $system_content != @{"status":"error","code":5027,"desc":"metric name not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":111,"tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1028,"desc":"metric name type should be string"}@ then
+if $system_content != @{"status":"error","code":5028,"desc":"metric name type should be string"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1029,"desc":"metric name length is 0"}@ then
+if $system_content != @{"status":"error","code":5029,"desc":"metric name length is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234a1234567890123456789012345678901234","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1030,"desc":"metric name length too long"}@ then
+if $system_content != @{"status":"error","code":5030,"desc":"metric name length too long"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"}}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1031,"desc":"timestamp not find"}@ then
+if $system_content != @{"status":"error","code":5031,"desc":"timestamp not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":""}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1032,"desc":"timestamp type should be integer"}@ then
+if $system_content != @{"status":"error","code":5032,"desc":"timestamp type should be integer"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":-1}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1033,"desc":"timestamp value smaller than 0"}@ then
+if $system_content != @{"status":"error","code":5033,"desc":"timestamp value smaller than 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1034,"desc":"tags not find"}@ then
+if $system_content != @{"status":"error","code":5034,"desc":"tags not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1035,"desc":"tags size is 0"}@ then
+if $system_content != @{"status":"error","code":5035,"desc":"tags size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":"","timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1035,"desc":"tags size is 0"}@ then
+if $system_content != @{"status":"error","code":5035,"desc":"tags size is 0"}@ then
return -1
endi
#system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor","host":"windows","instance":"1","objectname":"Processor","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata
#print $system_content
-#if $system_content != @{"status":"error","code":1036,"desc":"tags size too long"}@ then
+#if $system_content != @{"status":"error","code":5036,"desc":"tags size too long"}@ then
# return -1
#endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1035,"desc":"tags size is 0"}@ then
+if $system_content != @{"status":"error","code":5035,"desc":"tags size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"":"windows"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1038,"desc":"tag name is null"}@ then
+if $system_content != @{"status":"error","code":5038,"desc":"tag name is null"}@ then
return -1
endi
#system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host111111111111222222222222222222222":""},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
#print $system_content
-#if $system_content != @{"status":"error","code":1039,"desc":"tag name length too long"}@ then
+#if $system_content != @{"status":"error","code":5039,"desc":"tag name length too long"}@ then
# return -1
#endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":true},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1040,"desc":"tag value type should be number or string"}@ then
+if $system_content != @{"status":"error","code":5040,"desc":"tag value type should be number or string"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":""},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1041,"desc":"tag value is null"}@ then
+if $system_content != @{"status":"error","code":5041,"desc":"tag value is null"}@ then
return -1
endi
-system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"1022":"111"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
+system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"5022":"111"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1042,"desc":"table is null"}@ then
+if $system_content != @{"status":"error","code":5042,"desc":"table is null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222host111111111111222222222222222222222"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1043,"desc":"table name length too long"}@ then
+if $system_content != @{"status":"error","code":5043,"desc":"table name length too long"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1045,"desc":"fields size is 0"}@ then
+if $system_content != @{"status":"error","code":5045,"desc":"fields size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"":0,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1048,"desc":"field name is null"}@ then
+if $system_content != @{"status":"error","code":5048,"desc":"field name is null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":"","Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1051,"desc":"field value is null"}@ then
+if $system_content != @{"status":"error","code":5051,"desc":"field value is null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{"fields":{"Percent_DPC_Time":true,"Percent_Idle_Time":95.59830474853516,"Percent_Interrupt_Time":0,"Percent_Privileged_Time":0,"Percent_Processor_Time":0,"Percent_User_Time":0},"name":"win_cpu","tags":{"host":"windows","instance":"1","objectname":"Processor"},"timestamp":1535784122}' 127.0.0.1:6020/telegraf/db/root/taosdata1
print $system_content
-if $system_content != @{"status":"error","code":1050,"desc":"field value type should be number or string"}@ then
+if $system_content != @{"status":"error","code":5050,"desc":"field value type should be number or string"}@ then
return -1
endi
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index e2ebd9af637f9eda6e45a72ce3b0437457ec06cc..3c4733a25b8b20e6a15686a14a2560f6e6b5b599 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -365,3 +365,7 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/sync_replica3_dropDb.sim
./test.sh -f unique/arbitrator/sync_replica3_dropTable.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim
diff --git a/tests/script/jenkins/unique.txt b/tests/script/jenkins/unique.txt
index afd0ea55c00e4fd20a1dd0eb87851f0660c38f90..06edb8890a9f8b9c246703397cec3718313f637d 100644
--- a/tests/script/jenkins/unique.txt
+++ b/tests/script/jenkins/unique.txt
@@ -133,3 +133,7 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/sync_replica3_dropDb.sim
./test.sh -f unique/arbitrator/sync_replica3_dropTable.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim
+./test.sh -f unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim
diff --git a/tests/script/regressionSuite.sim b/tests/script/regressionSuite.sim
index f4ffb5ea9e6655319e5fa9d0a7b0e31133497405..ff1f9f53559c6da89ed2cdaf77c29f1859578ae6 100644
--- a/tests/script/regressionSuite.sim
+++ b/tests/script/regressionSuite.sim
@@ -124,7 +124,7 @@ run general/parser/slimit.sim
run general/parser/fill.sim
run general/parser/fill_stb.sim
run general/parser/interp.sim
-# run general/parser/where.sim
+run general/parser/where.sim
run general/parser/join.sim
run general/parser/join_multivnode.sim
run general/parser/select_with_tags.sim
diff --git a/tests/script/sh/deploy.sh b/tests/script/sh/deploy.sh
index ee7093a2ca342059510e95870f83fc35850cd2b2..eb0a9b526de8f4383df5407efe17d85fdc07a745 100755
--- a/tests/script/sh/deploy.sh
+++ b/tests/script/sh/deploy.sh
@@ -113,7 +113,8 @@ echo "logDir $LOG_DIR" >> $TAOS_CFG
echo "mDebugFlag 135" >> $TAOS_CFG
echo "sdbDebugFlag 135" >> $TAOS_CFG
echo "dDebugFlag 135" >> $TAOS_CFG
-echo "vDebugFlag 143" >> $TAOS_CFG
+echo "vDebugFlag 135" >> $TAOS_CFG
+echo "tsdbDebugFlag 135" >> $TAOS_CFG
echo "cDebugFlag 135" >> $TAOS_CFG
echo "jnidebugFlag 135" >> $TAOS_CFG
echo "odbcdebugFlag 135" >> $TAOS_CFG
@@ -132,7 +133,7 @@ echo "monitorInterval 1" >> $TAOS_CFG
echo "http 0" >> $TAOS_CFG
echo "numOfThreadsPerCore 2.0" >> $TAOS_CFG
echo "defaultPass taosdata" >> $TAOS_CFG
-echo "numOfLogLines 100000000" >> $TAOS_CFG
+echo "numOfLogLines 10000000" >> $TAOS_CFG
echo "mnodeEqualVnodeNum 0" >> $TAOS_CFG
echo "clog 2" >> $TAOS_CFG
echo "statusInterval 1" >> $TAOS_CFG
diff --git a/tests/script/unique/http/admin.sim b/tests/script/unique/http/admin.sim
index 3a480b6ebed9c8021989f98af385780d54dbf946..10fad091b53e2012e42275e8b99734f2ad2c502e 100644
--- a/tests/script/unique/http/admin.sim
+++ b/tests/script/unique/http/admin.sim
@@ -33,49 +33,49 @@ print =============== step1 - login
system_content curl 127.0.0.1:6020/admin/
print 1-> $system_content
-if $system_content != @{"status":"error","code":1011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:6020/admin/xx
print 2-> $system_content
-if $system_content != @{"status":"error","code":1011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:6020/admin/login
print 3-> $system_content
-if $system_content != @{"status":"error","code":1011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:6020/admin/login/root
print 4-> $system_content
-if $system_content != @{"status":"error","code":1011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
return -1
endi
system_content curl 127.0.0.1:6020/admin/login/root/123
print 5-> $system_content
-if $system_content != @{"status":"error","code":1000,"desc":"auth failure"}@ then
+if $system_content != @{"status":"error","code":3,"desc":"auth failure"}@ then
return -1
endi
system_content curl 127.0.0.1:6020/admin/login/root/123/1/1/3
print 6-> $system_content
-if $system_content != @{"status":"error","code":1000,"desc":"auth failure"}@ then
+if $system_content != @{"status":"error","code":3,"desc":"auth failure"}@ then
return -1
endi
system_content curl -H 'Authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.' -d 'show databases' 127.0.0.1:6020/admin/login/root/1
print 7-> $system_content
-if $system_content != @{"status":"error","code":1010,"desc":"invalid type of Authorization"}@ then
+if $system_content != @{"status":"error","code":5010,"desc":"invalid type of Authorization"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3d3cudGFvc2RhdGEuY29tIiwicGFzcyI6InRhb3NkYXRhIiwic3ViIjoicm9vdCJ9.xPv3b5odlR7YF8G_QWASjIRbMtA5v4ItToJ35fFgi' 127.0.0.1:6020/admin/login/root/1
print 8-> $system_content
-if $system_content != @{"status":"error","code":1053,"desc":"parse http auth token error"}@ then
+if $system_content != @{"status":"error","code":5053,"desc":"parse http auth token error"}@ then
return -1
endi
@@ -105,7 +105,7 @@ endi
system_content curl 127.0.0.1:6020/admin/logout
print 11 -----> $system_content
-if $system_content != @{"status":"error","code":1011,"desc":"no auth info input"}@ then
+if $system_content != @{"status":"error","code":5011,"desc":"no auth info input"}@ then
return -1
endi
@@ -168,7 +168,7 @@ print =============== step7 - use dbs
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d 'use d1;' 127.0.0.1:6020/admin/all
print 23-> $system_content
-if $system_content != @{"status":"error","code":1017,"desc":"no need to execute use db cmd"}@ then
+if $system_content != @{"status":"error","code":5017,"desc":"no need to execute use db cmd"}@ then
return -1
endi
diff --git a/tests/script/unique/http/opentsdb.sim b/tests/script/unique/http/opentsdb.sim
index 2254303e9e29f5de9c91741134af4691d60d5c99..7e2400bb156b86019ad7af600875d8532440a64c 100644
--- a/tests/script/unique/http/opentsdb.sim
+++ b/tests/script/unique/http/opentsdb.sim
@@ -3,7 +3,6 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
-system sh/cfg.sh -n dnode1 -c httpDebugFlag -v 135
system sh/exec.sh -n dnode1 -s start
sleep 3000
@@ -14,92 +13,92 @@ print ============================ dnode1 start
print =============== step1 - parse
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/
print $system_content
-if $system_content != @{"status":"error","code":1057,"desc":"database name can not be null"}@ then
+if $system_content != @{"status":"error","code":5057,"desc":"database name can not be null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db123456789012345678901234567890db
print $system_content
-if $system_content != @{"status":"error","code":1058,"desc":"database name too long"}@ then
+if $system_content != @{"status":"error","code":5058,"desc":"database name too long"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/
print $system_content
-if $system_content != @{"status":"error","code":1057,"desc":"database name can not be null"}@ then
+if $system_content != @{"status":"error","code":5057,"desc":"database name can not be null"}@ then
return -1
endi
system_content curl -H 'Authorization: Taosd /KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04' -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put2
print $system_content
-if $system_content != @{"status":"error","code":1009,"desc":"http url parse error"}@ then
+if $system_content != @{"status":"error","code":5009,"desc":"http url parse error"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1060,"desc":"metrics size is 0"}@ then
+if $system_content != @{"status":"error","code":5060,"desc":"metrics size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1059,"desc":"invalid opentsdb json fromat"}@ then
+if $system_content != @{"status":"error","code":5059,"desc":"invalid opentsdb json fromat"}@ then
return -1
endi
system_content curl -u root:taosdata -d '{}' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1060,"desc":"metrics size is 0"}@ then
+if $system_content != @{"status":"error","code":5060,"desc":"metrics size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1062,"desc":"metric name not find"}@ then
+if $system_content != @{"status":"error","code":5062,"desc":"metric name not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": 1,"timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1063,"desc":"metric name type should be string"}@ then
+if $system_content != @{"status":"error","code":5063,"desc":"metric name type should be string"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1064,"desc":"metric name length is 0"}@ then
+if $system_content != @{"status":"error","code":5064,"desc":"metric name length is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "ab1234567890123456789012345678ab1234567890123456789012345678","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"ab1234567890123456789012345678ab1234567890123456789012345678","stable":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb","table":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb_lga_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"status":"error","code":-2147482101,"desc":"tsdb timestamp is out of range"}}],"failed":1,"success":0,"affected_rows":0}@ then
+if $system_content != @{"errors":[{"datapoint":{"metric":"ab1234567890123456789012345678ab1234567890123456789012345678","stable":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb","table":"ab1234567890123456789012345678ab1234567890123456789012345678_d_bbb_lga_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"status":"error","code":1547,"desc":"tsdb timestamp is out of range"}}],"failed":1,"success":0,"affected_rows":0}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1066,"desc":"timestamp not find"}@ then
+if $system_content != @{"status":"error","code":5066,"desc":"timestamp not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": "2","value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1067,"desc":"timestamp type should be integer"}@ then
+if $system_content != @{"status":"error","code":5067,"desc":"timestamp type should be integer"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": -1,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1068,"desc":"timestamp value smaller than 0"}@ then
+if $system_content != @{"status":"error","code":5068,"desc":"timestamp value smaller than 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1078,"desc":"value not find"}@ then
+if $system_content != @{"status":"error","code":5078,"desc":"value not find"}@ then
return -1
endi
@@ -107,49 +106,49 @@ endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1069,"desc":"tags not find"}@ then
+if $system_content != @{"status":"error","code":5069,"desc":"tags not find"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1070,"desc":"tags size is 0"}@ then
+if $system_content != @{"status":"error","code":5070,"desc":"tags size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": 0}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1070,"desc":"tags size is 0"}@ then
+if $system_content != @{"status":"error","code":5070,"desc":"tags size is 0"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web01","group1": "1","group1": "1","group1": "1","group1": "1","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":-2147482782,"desc":"failed to create table"}}],"failed":1,"success":0,"affected_rows":0}@ then
+if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbbbbbb","table":"sys_cpu_d_bbbbbbb_lga_1_1_1_1_1_web01","timestamp":1346846400,"value":18.000000,"tags":{"dc":"lga","group1":"1","group1":"1","group1":"1","group1":"1","group1":"1","host":"web01"},"status":"error","code":866,"desc":"failed to create table"}}],"failed":1,"success":0,"affected_rows":0}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"": "web01"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1073,"desc":"tag name is null"}@ then
+if $system_content != @{"status":"error","code":5073,"desc":"tag name is null"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host01123456789001123456789001123456789001123456789001123456789001123456789": "01"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1074,"desc":"tag name length too long"}@ then
+if $system_content != @{"status":"error","code":5074,"desc":"tag name length too long"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": "web011234567890011234567890011234567890011234567890011234567890011234567890011234567890011234567890"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1077,"desc":"tag value can not more than 64"}@ then
+if $system_content != @{"status":"error","code":5077,"desc":"tag value can not more than 64"}@ then
return -1
endi
system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846400,"value": 18,"tags": {"host": ""}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"status":"error","code":1076,"desc":"tag value is null"}@ then
+if $system_content != @{"status":"error","code":5076,"desc":"tag value is null"}@ then
return -1
endi
@@ -175,11 +174,11 @@ if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09-
endi
print =============== step3 - multi-query data
-system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846401000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402000,"value": 18,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put
+system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 1346846405000,"value": 18,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_cpu","timestamp": 1346846402000,"value": 18,"tags": {"host": "web02","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put
print $system_content
-if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846401000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}},{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web02","timestamp":1346846402000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web02"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":2,"affected_rows":2}@ then
+if $system_content != @{"errors":[{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web01","timestamp":1346846405000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web01"},"affected_rows":1,"status":"succ"}},{"datapoint":{"metric":"sys_cpu","stable":"sys_cpu_d_bbb","table":"sys_cpu_d_bbb_lga_1_web02","timestamp":1346846402000,"value":18.000000,"tags":{"dc":"lga","group1":"1","host":"web02"},"affected_rows":1,"status":"succ"}}],"failed":0,"success":2,"affected_rows":2}@ then
return -1
endi
@@ -187,7 +186,7 @@ system_content curl -u root:taosdata -d 'select * from db.sys_cpu_d_bbb_lga_1_w
print $system_content
-if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09-05 20:00:00.000",18.000000000],["2012-09-05 20:00:01.000",18.000000000]],"rows":2}@ then
+if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09-05 20:00:00.000",18.000000000],["2012-09-05 20:00:05.000",18.000000000]],"rows":2}@ then
return -1
endi
@@ -200,7 +199,7 @@ if $system_content != @{"status":"succ","head":["count(*)"],"data":[[3]],"rows":
endi
print =============== step4 - summary-put data
-system_content curl -u root:taosdata -d '[{"metric": "sys_mem","timestamp": 1346846400000,"value": 8,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_mem","timestamp": 1346846401000,"value": 9,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put?details=false
+system_content curl -u root:taosdata -d '[{"metric": "sys_mem","timestamp": 1346846400000,"value": 8,"tags": {"host": "web01","group1": "1","dc": "lga"}},{"metric": "sys_mem","timestamp": 1346846405000,"value": 9,"tags": {"host": "web01","group1": "1","dc": "lga"}}]' 127.0.0.1:6020/opentsdb/db/put?details=false
print $system_content
@@ -212,7 +211,7 @@ system_content curl -u root:taosdata -d 'select * from db.sys_mem_d_bbb_lga_1_w
print $system_content
-if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09-05 20:00:00.000",8.000000000],["2012-09-05 20:00:01.000",9.000000000]],"rows":2}@ then
+if $system_content != @{"status":"succ","head":["ts","value"],"data":[["2012-09-05 20:00:00.000",8.000000000],["2012-09-05 20:00:05.000",9.000000000]],"rows":2}@ then
return -1
endi
@@ -234,7 +233,7 @@ system_content curl -u root:taosdata -d '[{"metric": "sys_cpu","timestamp": 134
system_content curl -u root:taosdata -d 'select count(*) from db.sys_cpu_d_bbb' 127.0.0.1:6020/rest/sql/
print $system_content
-if $system_content != @{"status":"succ","head":["count(*)"],"data":[[8]],"rows":1}@ then
+if $system_content != @{"status":"succ","head":["count(*)"],"data":[[7]],"rows":1}@ then
return -1
endi
diff --git a/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim
new file mode 100644
index 0000000000000000000000000000000000000000..e0b5e9b93102a5f1e656300bd5036dd869c9eff3
--- /dev/null
+++ b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim
@@ -0,0 +1,272 @@
+# Test case describe: dnode1/dnode2 include mnode and vnode roles
+# step 1: start dnode1/dnode2, and added into cluster
+# step 2: create db(repl = 2), table, insert data,
+# step 4: stop dnode1, remove its mnode dir, and copy mnode dir of dnode2 to dnode1
+# step 5: restart dnode1, waiting sync end
+# step 6: stop dnode2, reset query cache, and query
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+#system sh/deploy.sh -n dnode3 -i 3
+#system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
+#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+#system sh/cfg.sh -n dnode3 -c walLevel -v 2
+#system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1/dnode2 and add into cluster
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sleep 1000
+sql connect
+sleep 1000
+sql create dnode $hostname2
+sleep 1000
+
+print ============== step2: create database with replica 2, and create table, insert data
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 2 cache 1
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 double) tags(t1 int)
+$rowNum = 1200
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print rows:$rows data00:$data00 totalRows:$totalRows
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
+sql insert into $tb values ( now - 20d , -20 )
+sql insert into $tb values ( now - 40d , -40 )
+$totalRows = $totalRows + 2
+
+print ============== step4: stop dnode1
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+
+$loopCnt = 0
+wait_dnode1_offline:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != offline then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+#sql show vgroups
+#print show vgroups:
+#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+
+print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
+system_content rm -rf ../../../sim/dnode1/data/mnode
+system_content cp -rf ../../../sim/dnode2/data/mnode ../../../sim/dnode1/data/
+
+print ============== step6: restart dnode1, waiting sync end
+system sh/exec.sh -n dnode1 -s start
+sleep 1000
+
+$loopCnt = 0
+wait_dnode1_ready:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 20 then
+ return -1
+endi
+
+sql show dnodes -x wait_dnode1_ready
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+
+$loopCnt = 0
+wait_dnode1_vgroup_slave:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show vgroups
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
+$d2v2status = $data4_4
+$d2v3status = $data4_2
+$d2v4status = $data4_3
+
+$d1v2status = $data7_4
+$d1v3status = $data7_2
+$d1v4status = $data7_3
+
+if $d2v2status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v3status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v4status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+if $d1v2status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v3status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v4status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+print ============== step7: stop dnode2
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+
+$loopCnt = 0
+wait_dnode2_offline:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+if $dnode2Status != offline then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+
+sql reset query cache
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
\ No newline at end of file
diff --git a/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim
new file mode 100644
index 0000000000000000000000000000000000000000..ae7fc6af170637f5c157b898dfc90b8502ca15cd
--- /dev/null
+++ b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim
@@ -0,0 +1,274 @@
+# Test case describe: dnode1/dnode2 include mnode and vnode roles
+# step 1: start dnode1/dnode2, and added into cluster
+# step 2: create db(repl = 2), table, insert data,
+# step 4: stop dnode1, remove its mnode and vnode dir, and copy mnode and vnode dir of dnode2 to dnode1
+# step 5: restart dnode1, waiting sync end
+# step 6: stop dnode2, reset query cache, and query
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+#system sh/deploy.sh -n dnode3 -i 3
+#system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
+#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+#system sh/cfg.sh -n dnode3 -c walLevel -v 2
+#system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1/dnode2 and add into cluster
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sleep 1000
+sql connect
+sleep 1000
+sql create dnode $hostname2
+sleep 1000
+
+print ============== step2: create database with replica 2, and create table, insert data
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 2 cache 1
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 double) tags(t1 int)
+$rowNum = 1200
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print rows:$rows data00:$data00 totalRows:$totalRows
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
+sql insert into $tb values ( now - 20d , -20 )
+sql insert into $tb values ( now - 40d , -40 )
+$totalRows = $totalRows + 2
+
+print ============== step4: stop dnode1
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+
+$loopCnt = 0
+wait_dnode1_offline:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != offline then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+#sql show vgroups
+#print show vgroups:
+#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+
+print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
+system_content rm -rf ../../../sim/dnode1/data/vnode
+system_content rm -rf ../../../sim/dnode1/data/mnode
+system_content cp -rf ../../../sim/dnode2/data/vnode ../../../sim/dnode1/data/
+system_content cp -rf ../../../sim/dnode2/data/mnode ../../../sim/dnode1/data/
+
+print ============== step6: restart dnode1, waiting sync end
+system sh/exec.sh -n dnode1 -s start
+sleep 1000
+
+$loopCnt = 0
+wait_dnode1_ready:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 20 then
+ return -1
+endi
+
+sql show dnodes -x wait_dnode1_ready
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+
+$loopCnt = 0
+wait_dnode1_vgroup_slave:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show vgroups
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
+$d2v2status = $data4_4
+$d2v3status = $data4_2
+$d2v4status = $data4_3
+
+$d1v2status = $data7_4
+$d1v3status = $data7_2
+$d1v4status = $data7_3
+
+if $d2v2status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v3status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v4status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+if $d1v2status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v3status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v4status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+print ============== step7: stop dnode2
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+
+$loopCnt = 0
+wait_dnode2_offline:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+if $dnode2Status != offline then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+
+sql reset query cache
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
\ No newline at end of file
diff --git a/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim
new file mode 100644
index 0000000000000000000000000000000000000000..dc9bc6269683b3e59bf97b6f38fd16f7af6ff1d0
--- /dev/null
+++ b/tests/script/unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim
@@ -0,0 +1,210 @@
+# Test case describe: dnode1/dnode2 include mnode and vnode roles
+# step 1: start dnode1/dnode2, and added into cluster
+# step 2: create db(repl = 2), table, insert data,
+# step 4: stop dnode1, remove its mnode and vnode dir, and copy mnode and vnode dir of dnode2 to dnode1
+# step 5: restart dnode1, waiting sync end
+# step 6: stop dnode2, reset query cache, and query
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+#system sh/deploy.sh -n dnode3 -i 3
+#system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
+#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+#system sh/cfg.sh -n dnode3 -c walLevel -v 2
+#system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1/dnode2 and add into cluster
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sleep 1000
+sql connect
+sleep 1000
+sql create dnode $hostname2
+sleep 1000
+
+print ============== step2: create database with replica 2, and create table, insert data
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 2 cache 1
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 double) tags(t1 int)
+$rowNum = 1200
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print rows:$rows data00:$data00 totalRows:$totalRows
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
+sql insert into $tb values ( now - 20d , -20 )
+sql insert into $tb values ( now - 40d , -40 )
+$totalRows = $totalRows + 2
+
+print ============== step4: stop dnode1
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+
+print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
+system_content rm -rf ../../../sim/dnode1/data/vnode
+system_content rm -rf ../../../sim/dnode1/data/mnode
+system_content cp -rf ../../../sim/dnode2/data/vnode ../../../sim/dnode1/data/
+system_content cp -rf ../../../sim/dnode2/data/mnode ../../../sim/dnode1/data/
+
+print ============== step6: restart dnode1/dnode2
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sleep 1000
+sql connect
+sql use $db
+
+$loopCnt = 0
+wait_dnode1_ready:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 20 then
+ return -1
+endi
+
+sql show dnodes -x wait_dnode1_ready
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+
+$loopCnt = 0
+wait_dnode1_vgroup_slave:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show vgroups
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
+$d2v2status = $data4_4
+$d2v3status = $data4_2
+$d2v4status = $data4_3
+
+$d1v2status = $data7_4
+$d1v3status = $data7_2
+$d1v4status = $data7_3
+
+if $d2v2status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v3status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v4status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+if $d1v2status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v3status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v4status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+sql reset query cache
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
\ No newline at end of file
diff --git a/tests/script/unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim b/tests/script/unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim
new file mode 100644
index 0000000000000000000000000000000000000000..b754dc7a496950e0285037df60913c9c0fe2d478
--- /dev/null
+++ b/tests/script/unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim
@@ -0,0 +1,272 @@
+# Test case describe: dnode1/dnode2 include mnode and vnode roles
+# step 1: start dnode1/dnode2, and added into cluster
+# step 2: create db(repl = 2), table, insert data,
+# step 4: stop dnode1, remove its vnode dir, and copy vnode dir of dnode2 to dnode1
+# step 5: restart dnode1, waiting sync end
+# step 6: stop dnode2, reset query cache, and query
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+#system sh/deploy.sh -n dnode3 -i 3
+#system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
+system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
+#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
+#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
+
+system sh/cfg.sh -n dnode1 -c walLevel -v 2
+system sh/cfg.sh -n dnode2 -c walLevel -v 2
+#system sh/cfg.sh -n dnode3 -c walLevel -v 2
+#system sh/cfg.sh -n dnode4 -c walLevel -v 2
+
+system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
+system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
+#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
+
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
+#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
+
+system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
+system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
+#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
+#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
+
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
+system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
+#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
+
+system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
+system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
+#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
+
+print ============== step0: start tarbitrator
+system sh/exec_tarbitrator.sh -s start
+
+print ============== step1: start dnode1/dnode2 and add into cluster
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sleep 1000
+sql connect
+sleep 1000
+sql create dnode $hostname2
+sleep 1000
+
+print ============== step2: create database with replica 2, and create table, insert data
+$totalTableNum = 10
+$sleepTimer = 3000
+
+$db = db
+sql create database $db replica 2 cache 1
+sql use $db
+
+# create table , insert data
+$stb = stb
+sql create table $stb (ts timestamp, c1 double) tags(t1 int)
+$rowNum = 1200
+$tblNum = $totalTableNum
+$totalRows = 0
+$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
+
+$i = 0
+while $i < $tblNum
+ $tb = tb . $i
+ sql create table $tb using $stb tags( $i )
+
+ $x = 0
+ while $x < $rowNum
+ $ts = $tsStart + $x
+ sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
+ $x = $x + 60
+ endw
+ $totalRows = $totalRows + $x
+ print info: inserted $x rows into $tb and totalRows: $totalRows
+ $i = $i + 1
+endw
+
+sql select count(*) from $stb
+print rows:$rows data00:$data00 totalRows:$totalRows
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != $totalRows then
+ return -1
+endi
+
+
+print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
+sql insert into $tb values ( now - 20d , -20 )
+sql insert into $tb values ( now - 40d , -40 )
+$totalRows = $totalRows + 2
+
+print ============== step4: stop dnode1
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+
+$loopCnt = 0
+wait_dnode1_offline:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != offline then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_offline
+endi
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
+
+#sql show vgroups
+#print show vgroups:
+#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+
+print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
+system_content rm -rf ../../../sim/dnode1/data/vnode
+system_content cp -rf ../../../sim/dnode2/data/vnode ../../../sim/dnode1/data/
+
+print ============== step6: restart dnode1, waiting sync end
+system sh/exec.sh -n dnode1 -s start
+sleep 1000
+
+$loopCnt = 0
+wait_dnode1_ready:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 20 then
+ return -1
+endi
+
+sql show dnodes -x wait_dnode1_ready
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+if $dnode2Status != ready then
+ sleep 2000
+ goto wait_dnode1_ready
+endi
+
+$loopCnt = 0
+wait_dnode1_vgroup_slave:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show vgroups
+if $rows != 3 then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+print show vgroups:
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
+print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
+print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
+$d2v2status = $data4_4
+$d2v3status = $data4_2
+$d2v4status = $data4_3
+
+$d1v2status = $data7_4
+$d1v3status = $data7_2
+$d1v4status = $data7_3
+
+if $d2v2status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v3status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d2v4status != master then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+if $d1v2status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v3status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+if $d1v4status != slave then
+ sleep 2000
+ goto wait_dnode1_vgroup_slave
+endi
+
+print ============== step7: stop dnode2
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+
+$loopCnt = 0
+wait_dnode2_offline:
+$loopCnt = $loopCnt + 1
+if $loopCnt == 10 then
+ return -1
+endi
+
+sql show dnodes
+if $rows != 2 then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
+print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
+$dnode1Status = $data4_1
+$dnode2Status = $data4_2
+
+if $dnode1Status != ready then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+if $dnode2Status != offline then
+ sleep 2000
+ goto wait_dnode2_offline
+endi
+
+sql reset query cache
+
+# check using select
+sql select count(*) from $stb
+print data00 $data00
+if $data00 != $totalRows then
+ return -1
+endi
\ No newline at end of file
diff --git a/tests/stress/.gitignore b/tests/stress/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..25a8031c43c318e14e42873dcceca728aa5a48e1
--- /dev/null
+++ b/tests/stress/.gitignore
@@ -0,0 +1,3 @@
+stress
+stress.exe
+cases.json
\ No newline at end of file
diff --git a/tests/stress/README.md b/tests/stress/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a7f8a2dac6a8500bbbdb177ecbce89ea2b7f97a6
--- /dev/null
+++ b/tests/stress/README.md
@@ -0,0 +1,80 @@
+# STRESS
+
+Stress test tool for TDengine. It run a set of test cases randomly and show statistics.
+
+## COMMAND LINE
+
+``` bash
+$ ./stress [-h=] [-P=<0>] [-d=] [-u=] [-p=] [-c=<4>] [-f=] [-l=] [path_or_sql]
+```
+
+* **-h**: host name or IP address of TDengine server (default: localhost).
+* **-P**: port number of TDengine server (default: 0).
+* **-u**: user name (default: root).
+* **-p**: password (default: taosdata).
+* **-c**: concurrency, number of concurrent goroutines for query (default: 4).
+* **-f**: fetch data or not (default: true).
+* **-l**: log file path (default: no log).
+* **path_or_sql**: a SQL statement or path of a JSON file which contains the test cases (default: cases.json).
+
+## TEST CASE FILE
+
+```json
+[{
+ "weight": 1,
+ "sql": "select * from meters where ts>=now+%dm and ts<=now-%dm and c1=%v and c2=%d and c3='%s' and tbname='%s'",
+ "args": [{
+ "type": "range",
+ "min": 30,
+ "max": 60
+ }, {
+ "type": "bool"
+ }, {
+ "type": "int",
+ "min": -10,
+ "max": 20
+ }, {
+ "type": "string",
+ "min": 0,
+ "max": 10,
+ }, {
+ "type": "list",
+ "list": [
+ "table1",
+ "table2",
+ "table3",
+ "table4"
+ ]
+ }]
+}]
+```
+
+The test case file is a standard JSON file which contains an array of test cases. For test cases, field `sql` is mandatory, and it can optionally include a `weight` field and an `args` field which is an array of arguments.
+
+`sql` is a SQL statement, it can include zero or more arguments (placeholders).
+
+`weight` defines the possibility of the case being selected, the greater value the higher possibility. It must be an non-negative integer and the default value is zero, but, if all cases have a zero weight, all the weights are regarded as 1.
+
+Placeholders of `sql` are replaced by arguments in `args` at runtime. There are 5 types of arguments currently:
+
+* **bool**: generate a `boolean` value randomly.
+* **int**: generate an `integer` between [`min`, `max`] randomly, the default value of `min` is 0 and `max` is 100.
+* **range**: generate two `integer`s between [`min`, `max`] randomly, the first is less than the second, the default value of `min` is 0 and `max` is 100.
+* **string**: generate a `string` with length between [`min`, `max`] randomly, the default value of `min` is 0 and `max` is 100.
+* **list**: select an item from `list` randomly.
+
+## OUTPUT
+
+```
+ 00:00:08 | TOTAL REQ | TOTAL TIME(us) | TOTAL AVG(us) | REQUEST | TIME(us) | AVERAGE(us) |
+ TOTAL | 3027 | 26183890 | 8650.11 | 287 | 3060935 | 10665.28 |
+ SUCCESS | 3027 | 26183890 | 8650.11 | 287 | 3060935 | 10665.28 |
+ FAIL | 0 | 0 | 0.00 | 0 | 0 | 0.00 |
+```
+
+* **Col 2**: total number of request since test start.
+* **Col 3**: total time of all request since test start.
+* **Col 4**: average time of all request since test start.
+* **Col 5**: number of request in last second.
+* **Col 6**: time of all request in last second.
+* **Col 7**: average time of all request in last second.
diff --git a/tests/stress/go.mod b/tests/stress/go.mod
new file mode 100644
index 0000000000000000000000000000000000000000..df9b2806b551429ee59ad0d44e3eb0ad75ab39cb
--- /dev/null
+++ b/tests/stress/go.mod
@@ -0,0 +1,7 @@
+module github.com/taosdata/stress
+
+go 1.14
+
+require (
+ github.com/taosdata/driver-go v0.0.0-20200606095205-b786bac1857f
+)
diff --git a/tests/stress/main.go b/tests/stress/main.go
new file mode 100644
index 0000000000000000000000000000000000000000..c3b9290a379b0782f1042909cc0cadb728a4620e
--- /dev/null
+++ b/tests/stress/main.go
@@ -0,0 +1,406 @@
+package main
+
+import (
+ "database/sql"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "math/rand"
+ "os"
+ "os/signal"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ _ "github.com/taosdata/driver-go/taosSql"
+)
+
+type argument struct {
+ Type string `json:"type"`
+ Min int `json:"min"`
+ Max int `json:"max"`
+ List []interface{} `json:"list, omitempty"`
+}
+
+type testCase struct {
+ isQuery bool `json:"-"`
+ numArgs int `json:"-"`
+ Weight int `json:"weight"`
+ SQL string `json:"sql"`
+ Args []argument `json:"args"`
+}
+
+func (arg *argument) check() (int, error) {
+ if arg.Type == "list" {
+ if len(arg.List) == 0 {
+ return 0, errors.New("list cannot be empty")
+ }
+ return 1, nil
+ }
+
+ if arg.Max < arg.Min {
+ return 0, errors.New("invalid min/max value")
+ }
+
+ if arg.Type == "string" {
+ if arg.Min < 0 {
+ return 0, errors.New("negative string length")
+ }
+ }
+
+ if arg.Type == "int" && arg.Min == 0 && arg.Max == 0 {
+ arg.Max = arg.Min + 100
+ }
+
+ if arg.Type == "range" {
+ return 2, nil
+ }
+
+ return 1, nil
+}
+
+func (arg *argument) generate(args []interface{}) []interface{} {
+ const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+
+ switch arg.Type {
+ case "bool":
+ if rand.Intn(2) == 1 {
+ args = append(args, true)
+ } else {
+ args = append(args, false)
+ }
+
+ case "int":
+ v := rand.Intn(arg.Max-arg.Min+1) + arg.Min
+ args = append(args, v)
+
+ case "range":
+ v := rand.Intn(arg.Max-arg.Min) + arg.Min
+ args = append(args, v)
+ v = rand.Intn(arg.Max-v+1) + v
+ args = append(args, v)
+
+ case "string":
+ l := rand.Intn(arg.Max-arg.Min+1) + arg.Min
+ sb := strings.Builder{}
+ for i := 0; i < l; i++ {
+ sb.WriteByte(chars[rand.Intn(len(chars))])
+ }
+ args = append(args, sb.String())
+
+ case "list":
+ v := arg.List[rand.Intn(len(arg.List))]
+ args = append(args, v)
+ }
+
+ return args
+}
+
+func (tc *testCase) buildSql() string {
+ args := make([]interface{}, 0, tc.numArgs)
+ for i := 0; i < len(tc.Args); i++ {
+ args = tc.Args[i].generate(args)
+ }
+ return fmt.Sprintf(tc.SQL, args...)
+}
+
+type statitics struct {
+ succeeded int64
+ failed int64
+ succeededDuration int64
+ failedDuration int64
+}
+
+var (
+ host string
+ port uint
+ database string
+ user string
+ password string
+ fetch bool
+
+ chLog chan string
+ wgLog sync.WaitGroup
+ startAt time.Time
+ shouldStop int64
+ wgTest sync.WaitGroup
+ stat statitics
+ totalWeight int
+ cases []testCase
+)
+
+func loadTestCaseFromFile(file *os.File) error {
+ if e := json.NewDecoder(file).Decode(&cases); e != nil {
+ return e
+ }
+
+ if len(cases) == 0 {
+ return fmt.Errorf("no test case loaded.")
+ }
+
+ for i := 0; i < len(cases); i++ {
+ c := &cases[i]
+ c.SQL = strings.TrimSpace(c.SQL)
+ c.isQuery = strings.ToLower(c.SQL[:6]) == "select"
+ if c.Weight < 0 {
+ return fmt.Errorf("test %d: negative weight", i)
+ }
+ totalWeight += c.Weight
+
+ for j := 0; j < len(c.Args); j++ {
+ arg := &c.Args[j]
+ arg.Type = strings.ToLower(arg.Type)
+ n, e := arg.check()
+ if e != nil {
+ return fmt.Errorf("test case %d argument %d: %s", i, j, e.Error())
+ }
+ c.numArgs += n
+ }
+ }
+
+ if totalWeight == 0 {
+ for i := 0; i < len(cases); i++ {
+ cases[i].Weight = 1
+ }
+ totalWeight = len(cases)
+ }
+
+ return nil
+}
+
+func loadTestCase(pathOrSQL string) error {
+ if f, e := os.Open(pathOrSQL); e == nil {
+ defer f.Close()
+ return loadTestCaseFromFile(f)
+ }
+
+ pathOrSQL = strings.TrimSpace(pathOrSQL)
+ if strings.ToLower(pathOrSQL[:6]) != "select" {
+ return fmt.Errorf("'%s' is not a valid file or SQL statement", pathOrSQL)
+ }
+
+ cases = append(cases, testCase{
+ isQuery: true,
+ Weight: 1,
+ numArgs: 0,
+ SQL: pathOrSQL,
+ })
+ totalWeight = 1
+
+ return nil
+}
+
+func selectTestCase() *testCase {
+ sum, target := 0, rand.Intn(totalWeight)
+ var c *testCase
+ for i := 0; i < len(cases); i++ {
+ c = &cases[i]
+ sum += c.Weight
+ if sum > target {
+ break
+ }
+ }
+ return c
+}
+
+func runTest() {
+ defer wgTest.Done()
+ db, e := sql.Open("taosSql", fmt.Sprintf("%s:%s@tcp(%s:%v)/%s", user, password, host, port, database))
+ if e != nil {
+ fmt.Printf("failed to connect to database: %s\n", e.Error())
+ return
+ }
+ defer db.Close()
+
+ for atomic.LoadInt64(&shouldStop) == 0 {
+ c := selectTestCase()
+ str := c.buildSql()
+
+ start := time.Now()
+ if c.isQuery {
+ var rows *sql.Rows
+ if rows, e = db.Query(str); rows != nil {
+ if fetch {
+ for rows.Next() {
+ }
+ }
+ rows.Close()
+ }
+ } else {
+ _, e = db.Exec(str)
+ }
+ duration := time.Now().Sub(start).Microseconds()
+
+ if e != nil {
+ if chLog != nil {
+ chLog <- str + ": " + e.Error()
+ }
+ atomic.AddInt64(&stat.failed, 1)
+ atomic.AddInt64(&stat.failedDuration, duration)
+ } else {
+ atomic.AddInt64(&stat.succeeded, 1)
+ atomic.AddInt64(&stat.succeededDuration, duration)
+ }
+ }
+}
+
+func getStatPrinter() func(tm time.Time) {
+ var last statitics
+ lastPrintAt := startAt
+
+ return func(tm time.Time) {
+ var current statitics
+
+ current.succeeded = atomic.LoadInt64(&stat.succeeded)
+ current.failed = atomic.LoadInt64(&stat.failed)
+ current.succeededDuration = atomic.LoadInt64(&stat.succeededDuration)
+ current.failedDuration = atomic.LoadInt64(&stat.failedDuration)
+
+ seconds := int64(tm.Sub(startAt).Seconds())
+ format := "\033[47;30m %02v:%02v:%02v | TOTAL REQ | TOTAL TIME(us) | TOTAL AVG(us) | REQUEST | TIME(us) | AVERAGE(us) |\033[0m\n"
+ fmt.Printf(format, seconds/3600, seconds%3600/60, seconds%60)
+
+ tr := current.succeeded + current.failed
+ td := current.succeededDuration + current.failedDuration
+ r := tr - last.succeeded - last.failed
+ d := td - last.succeededDuration - last.failedDuration
+ ta, a := 0.0, 0.0
+ if tr > 0 {
+ ta = float64(td) / float64(tr)
+ }
+ if r > 0 {
+ a = float64(d) / float64(r)
+ }
+ format = " TOTAL | %9v | %14v | %13.2f | %7v | %10v | % 13.2f |\n"
+ fmt.Printf(format, tr, td, ta, r, d, a)
+
+ tr = current.succeeded
+ td = current.succeededDuration
+ r = tr - last.succeeded
+ d = td - last.succeededDuration
+ ta, a = 0.0, 0.0
+ if tr > 0 {
+ ta = float64(td) / float64(tr)
+ }
+ if r > 0 {
+ a = float64(d) / float64(r)
+ }
+ format = " SUCCESS | \033[32m%9v\033[0m | \033[32m%14v\033[0m | \033[32m%13.2f\033[0m | \033[32m%7v\033[0m | \033[32m%10v\033[0m | \033[32m%13.2f\033[0m |\n"
+ fmt.Printf(format, tr, td, ta, r, d, a)
+
+ tr = current.failed
+ td = current.failedDuration
+ r = tr - last.failed
+ d = td - last.failedDuration
+ ta, a = 0.0, 0.0
+ if tr > 0 {
+ ta = float64(td) / float64(tr)
+ }
+ if r > 0 {
+ a = float64(d) / float64(r)
+ }
+ format = " FAIL | \033[31m%9v\033[0m | \033[31m%14v\033[0m | \033[31m%13.2f\033[0m | \033[31m%7v\033[0m | \033[31m%10v\033[0m | \033[31m%13.2f\033[0m |\n"
+ fmt.Printf(format, tr, td, ta, r, d, a)
+
+ last = current
+ lastPrintAt = tm
+ }
+}
+
+func startLogger(path string) error {
+ if len(path) == 0 {
+ return nil
+ }
+
+ f, e := os.Create(path)
+ if e != nil {
+ return e
+ }
+
+ chLog = make(chan string, 100)
+ wgLog.Add(1)
+ go func() {
+ for s := range chLog {
+ if f != nil {
+ f.WriteString(s)
+ f.WriteString("\n")
+ }
+ }
+ f.Close()
+ wgLog.Done()
+ }()
+
+ return nil
+}
+
+func main() {
+ var concurrency uint
+ var logPath string
+ flag.StringVar(&host, "h", "localhost", "host name or IP address of TDengine server")
+ flag.UintVar(&port, "P", 0, "port (default 0)")
+ flag.StringVar(&database, "d", "test", "database name")
+ flag.StringVar(&user, "u", "root", "user name")
+ flag.StringVar(&password, "p", "taosdata", "password")
+ flag.BoolVar(&fetch, "f", true, "fetch result or not")
+ flag.UintVar(&concurrency, "c", 4, "concurrency, number of goroutines for query")
+ flag.StringVar(&logPath, "l", "", "path of log file (default: no log)")
+ flag.Parse()
+
+ if e := startLogger(logPath); e != nil {
+ fmt.Println("failed to open log file:", e.Error())
+ return
+ }
+
+ pathOrSQL := flag.Arg(0)
+ if len(pathOrSQL) == 0 {
+ pathOrSQL = "cases.json"
+ }
+ if e := loadTestCase(pathOrSQL); e != nil {
+ fmt.Println("failed to load test cases:", e.Error())
+ return
+ }
+
+ rand.Seed(time.Now().UnixNano())
+
+ fmt.Printf("\nSERVER: %s DATABASE: %s CONCURRENCY: %d FETCH DATA: %v\n\n", host, database, concurrency, fetch)
+
+ startAt = time.Now()
+ printStat := getStatPrinter()
+ printStat(startAt)
+
+ for i := uint(0); i < concurrency; i++ {
+ wgTest.Add(1)
+ go runTest()
+ }
+
+ interrupt := make(chan os.Signal, 1)
+ signal.Notify(interrupt, os.Interrupt)
+ ticker := time.NewTicker(time.Second)
+
+ fmt.Println("Ctrl + C to exit....\033[1A")
+
+LOOP:
+ for {
+ select {
+ case <-interrupt:
+ break LOOP
+ case tm := <-ticker.C:
+ fmt.Print("\033[4A")
+ printStat(tm)
+ }
+ }
+
+ atomic.StoreInt64(&shouldStop, 1)
+ fmt.Print("\033[100D'Ctrl + C' received, Waiting started query to stop...")
+ wgTest.Wait()
+
+ if chLog != nil {
+ close(chLog)
+ wgLog.Wait()
+ }
+ fmt.Print("\033[4A\033[100D")
+ printStat(time.Now())
+ fmt.Println()
+}