提交 1ab84e2c 编写于 作者: T Tao Liu

Merge branch 'develop' of https://github.com/taosdata/TDengine into develop

......@@ -63,19 +63,21 @@ typedef struct SLocalReducer {
// char * pBufForInterpo; // intermediate buffer for interpolation
tFilePage * pTempBuffer;
struct SQLFunctionCtx *pCtx;
int32_t rowSize; // size of each intermediate result.
int32_t status; // denote it is in reduce process, in reduce process, it
bool hasPrevRow; // cannot be released
int32_t rowSize; // size of each intermediate result.
int32_t finalRowSize; // final result row size
int32_t status; // denote it is in reduce process, in reduce process, it
bool hasPrevRow; // cannot be released
bool hasUnprocessedRow;
tOrderDescriptor * pDesc;
SColumnModel * resColModel;
tExtMemBuffer ** pExtMemBuffer; // disk-based buffer
SFillInfo* pFillInfo; // interpolation support structure
SFillInfo* pFillInfo; // interpolation support structure
char * pFinalRes; // result data after interpo
tFilePage * discardData;
SResultInfo * pResInfo;
bool discard;
int32_t offset; // limit offset value
bool orderPrjOnSTable; // projection query on stable
} SLocalReducer;
typedef struct SSubqueryState {
......
......@@ -341,16 +341,6 @@ bool stableQueryFunctChanged(int32_t funcId) {
*/
void resetResultInfo(SResultInfo *pResInfo) { pResInfo->initialized = false; }
void initResultInfo(SResultInfo *pResInfo) {
pResInfo->initialized = true; // the this struct has been initialized flag
pResInfo->complete = false;
pResInfo->hasResult = false;
pResInfo->numOfRes = 0;
memset(pResInfo->interResultBuf, 0, (size_t)pResInfo->bufLen);
}
void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable) {
assert(pResInfo->interResultBuf == NULL);
......@@ -387,9 +377,7 @@ static bool function_setup(SQLFunctionCtx *pCtx) {
*/
static void function_finalizer(SQLFunctionCtx *pCtx) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
if (pResInfo->hasResult != DATA_SET_FLAG) {
tscTrace("no result generated, result is set to NULL");
if (pCtx->outputType == TSDB_DATA_TYPE_BINARY || pCtx->outputType == TSDB_DATA_TYPE_NCHAR) {
setVardataNull(pCtx->aOutputBuf, pCtx->outputType);
} else {
......
......@@ -48,7 +48,7 @@ static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) {
int32_t radix = 10;
int32_t radixList[3] = {16, 8, 2};
int32_t radixList[3] = {16, 8, 2}; // the integer number with different radix: hex, oct, bin
if (pToken->type == TK_HEX || pToken->type == TK_OCT || pToken->type == TK_BIN) {
radix = radixList[pToken->type - TK_HEX];
}
......
......@@ -494,7 +494,6 @@ TAOS_STMT* taos_stmt_init(TAOS* taos) {
tsem_init(&pSql->rspSem, 0, 0);
pSql->signature = pSql;
pSql->pTscObj = pObj;
//pSql->pTscObj->pSql = pSql;
pSql->maxRetry = TSDB_MAX_REPLICA_NUM;
pStmt->pSql = pSql;
......@@ -515,7 +514,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
//doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen);
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
pSql->param = (void*)pSql;
pSql->param = (void*) pSql;
pSql->fp = waitForQueryRsp;
pSql->insertType = TSDB_QUERY_TYPE_STMT_INSERT;
......
......@@ -217,7 +217,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->numOfBuffer = numOfFlush;
pReducer->numOfVnode = numOfBuffer;
pReducer->pDesc = pDesc;
tscTrace("%p the number of merged leaves is: %d", pSql, pReducer->numOfBuffer);
......@@ -278,6 +278,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
param->groupOrderType = pQueryInfo->groupbyExpr.orderType;
pReducer->orderPrjOnSTable = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
pRes->code = tLoserTreeCreate(&pReducer->pLoserTree, pReducer->numOfBuffer, param, treeComparator);
if (pReducer->pLoserTree == NULL || pRes->code != 0) {
......@@ -309,10 +310,10 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
pReducer->nResultBufSize = pMemBuffer[0]->pageSize * 16;
pReducer->pResultBuf = (tFilePage *)calloc(1, pReducer->nResultBufSize + sizeof(tFilePage));
int32_t finalRowLength = tscGetResRowLength(pQueryInfo->exprList);
pReducer->finalRowSize = tscGetResRowLength(pQueryInfo->exprList);
pReducer->resColModel = finalmodel;
pReducer->resColModel->capacity = pReducer->nResultBufSize / finalRowLength;
assert(finalRowLength <= pReducer->rowSize);
pReducer->resColModel->capacity = pReducer->nResultBufSize / pReducer->finalRowSize;
assert(pReducer->finalRowSize <= pReducer->rowSize);
pReducer->pFinalRes = calloc(1, pReducer->rowSize * pReducer->resColModel->capacity);
// pReducer->pBufForInterpo = calloc(1, pReducer->nResultBufSize);
......@@ -389,7 +390,7 @@ static int32_t tscFlushTmpBufferImpl(tExtMemBuffer *pMemoryBuf, tOrderDescriptor
assert(pPage->num <= pDesc->pColumnModel->capacity);
// sort before flush to disk, the data must be consecutively put on tFilePage.
if (pDesc->orderIdx.numOfCols > 0) {
if (pDesc->orderInfo.numOfCols > 0) {
tColDataQSort(pDesc, pPage->num, 0, pPage->num - 1, pPage->data, orderType);
}
......@@ -590,12 +591,10 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage *tmpBuffer) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
int16_t functionId = tscSqlExprGet(pQueryInfo, 0)->functionId;
// disable merge procedure for column projection query
int16_t functionId = pReducer->pCtx[0].functionId;
assert(functionId != TSDB_FUNC_ARITHM);
if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pReducer->orderPrjOnSTable) {
return true;
}
......@@ -604,26 +603,33 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
}
tOrderDescriptor *pOrderDesc = pReducer->pDesc;
int32_t numOfCols = pOrderDesc->orderIdx.numOfCols;
SColumnOrderInfo* orderInfo = &pOrderDesc->orderInfo;
// no group by columns, all data belongs to one group
int32_t numOfCols = orderInfo->numOfCols;
if (numOfCols <= 0) {
return true;
}
if (pOrderDesc->orderIdx.pData[numOfCols - 1] == PRIMARYKEY_TIMESTAMP_COL_INDEX) { //<= 0
// super table interval query
if (orderInfo->pData[numOfCols - 1] == PRIMARYKEY_TIMESTAMP_COL_INDEX) {
/*
* super table interval query
* if the order columns is the primary timestamp, all result data belongs to one group
*/
assert(pQueryInfo->intervalTime > 0);
pOrderDesc->orderIdx.numOfCols -= 1;
if (numOfCols == 1) {
return true;
}
} else { // simple group by query
assert(pQueryInfo->intervalTime == 0);
}
// only one row exists
int32_t ret = compare_a(pOrderDesc, 1, 0, pPrev, 1, 0, tmpBuffer->data);
pOrderDesc->orderIdx.numOfCols = numOfCols;
return (ret == 0);
int32_t index = orderInfo->pData[0];
int32_t offset = (pOrderDesc->pColumnModel)->pFields[index].offset;
int32_t ret = memcmp(pPrev + offset, tmpBuffer->data + offset, pOrderDesc->pColumnModel->rowSize - offset);
return ret == 0;
}
int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOrderDescriptor **pOrderDesc,
......@@ -873,24 +879,24 @@ static void reversedCopyFromInterpolationToDstBuf(SQueryInfo *pQueryInfo, SSqlRe
* Note: pRes->pLocalReducer may be null, due to the fact that "tscDestroyLocalReducer" is called
* by "interuptHandler" function in shell
*/
static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneOutput) {
static void doFillResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool doneOutput) {
SSqlCmd * pCmd = &pSql->cmd;
SSqlRes * pRes = &pSql->res;
tFilePage * pFinalDataPage = pLocalReducer->pResultBuf;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
if (pRes->pLocalReducer != pLocalReducer) {
/*
* Release the SSqlObj is called, and it is int destroying function invoked by other thread.
* However, the other thread will WAIT until current process fully completes.
* Since the flag of release struct is set by doLocalReduce function
*/
assert(pRes->pLocalReducer == NULL);
}
// if (pRes->pLocalReducer != pLocalReducer) {
// /*
// * Release the SSqlObj is called, and it is int destroying function invoked by other thread.
// * However, the other thread will WAIT until current process fully completes.
// * Since the flag of release struct is set by doLocalReduce function
// */
// assert(pRes->pLocalReducer == NULL);
// }
// no interval query, no fill operation
if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
// no interval query, no fill operation
pRes->data = pLocalReducer->pFinalRes;
pRes->numOfRows = pFinalDataPage->num;
pRes->numOfClauseTotal += pRes->numOfRows;
......@@ -929,9 +935,7 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo
savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, pLocalReducer->pFillInfo);
}
int32_t rowSize = tscGetResRowLength(pQueryInfo->exprList);
memcpy(pRes->data, pFinalDataPage->data, pRes->numOfRows * rowSize);
memcpy(pRes->data, pFinalDataPage->data, pRes->numOfRows * pLocalReducer->finalRowSize);
pFinalDataPage->num = 0;
return;
}
......@@ -1037,16 +1041,13 @@ static void savePreviousRow(SLocalReducer *pLocalReducer, tFilePage *tmpBuffer)
static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer, bool needInit) {
// the tag columns need to be set before all functions execution
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t j = 0; j < size; ++j) {
SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, j);
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[j];
tVariantAssign(&pCtx->param[0], &pExpr->param[0]);
// tags/tags_dummy function, the tag field of SQLFunctionCtx is from the input buffer
int32_t functionId = pExpr->functionId;
int32_t functionId = pCtx->functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS_DUMMY) {
tVariantDestroy(&pCtx->tag);
char* input = pCtx->aInputElemBuf;
......@@ -1057,17 +1058,20 @@ static void doExecuteSecondaryMerge(SSqlCmd *pCmd, SLocalReducer *pLocalReducer,
} else {
tVariantCreateFromBinary(&pCtx->tag, input, pCtx->inputBytes, pCtx->inputType);
}
} else if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, j);
pCtx->param[0].i64Key = pExpr->param[0].i64Key;
}
pCtx->currentStage = SECONDARY_STAGE_MERGE;
if (needInit) {
aAggs[pExpr->functionId].init(pCtx);
aAggs[pCtx->functionId].init(pCtx);
}
}
for (int32_t j = 0; j < size; ++j) {
int32_t functionId = tscSqlExprGet(pQueryInfo, j)->functionId;
int32_t functionId = pLocalReducer->pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
......@@ -1101,8 +1105,7 @@ static int64_t getNumOfResultLocal(SQueryInfo *pQueryInfo, SQLFunctionCtx *pCtx)
* ts, tag, tagprj function can not decide the output number of current query
* the number of output result is decided by main output
*/
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, j);
int32_t functionId = pExpr->functionId;
int32_t functionId = pCtx[j].functionId;
if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ) {
continue;
}
......@@ -1136,15 +1139,13 @@ static void fillMultiRowsOfTagsVal(SQueryInfo *pQueryInfo, int32_t numOfRes, SLo
char *buf = malloc((size_t)maxBufSize);
for (int32_t k = 0; k < size; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k);
if (pExpr->functionId != TSDB_FUNC_TAG) {
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
if (pCtx->functionId != TSDB_FUNC_TAG) {
continue;
}
int32_t inc = numOfRes - 1; // tsdb_func_tag function only produce one row of result
memset(buf, 0, (size_t)maxBufSize);
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
memcpy(buf, pCtx->aOutputBuf, (size_t)pCtx->outputBytes);
for (int32_t i = 0; i < inc; ++i) {
......@@ -1160,8 +1161,8 @@ int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, k);
aAggs[pExpr->functionId].xFinalize(&pLocalReducer->pCtx[k]);
SQLFunctionCtx* pCtx = &pLocalReducer->pCtx[k];
aAggs[pCtx->functionId].xFinalize(pCtx);
}
pLocalReducer->hasPrevRow = false;
......@@ -1182,13 +1183,13 @@ int32_t finalizeRes(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer) {
*/
bool needToMerge(SQueryInfo *pQueryInfo, SLocalReducer *pLocalReducer, tFilePage *tmpBuffer) {
int32_t ret = 0; // merge all result by default
int16_t functionId = tscSqlExprGet(pQueryInfo, 0)->functionId;
int16_t functionId = pLocalReducer->pCtx[0].functionId;
if (functionId == TSDB_FUNC_PRJ || functionId == TSDB_FUNC_ARITHM) { // column projection query
ret = 1; // disable merge procedure
} else {
tOrderDescriptor *pDesc = pLocalReducer->pDesc;
if (pDesc->orderIdx.numOfCols > 0) {
if (pDesc->orderInfo.numOfCols > 0) {
if (pDesc->tsOrder == TSDB_ORDER_ASC) { // asc
// todo refactor comparator
ret = compare_a(pLocalReducer->pDesc, 1, 0, pLocalReducer->prevRowOfInput, 1, 0, tmpBuffer->data);
......@@ -1274,7 +1275,7 @@ bool doGenerateFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool no
taosFillCopyInputDataFromOneFilePage(pFillInfo, pResBuf);
}
doInterpolateResult(pSql, pLocalReducer, noMoreCurrentGroupRes);
doFillResult(pSql, pLocalReducer, noMoreCurrentGroupRes);
return true;
}
......@@ -1341,7 +1342,7 @@ static bool doBuildFilledResultForGroup(SSqlObj *pSql) {
// the first column must be the timestamp column
int32_t rows = taosGetNumOfResultWithFill(pFillInfo, remain, ekey, pLocalReducer->resColModel->capacity);
if (rows > 0) { // do interpo
doInterpolateResult(pSql, pLocalReducer, false);
doFillResult(pSql, pLocalReducer, false);
}
return true;
......@@ -1374,7 +1375,7 @@ static bool doHandleLastRemainData(SSqlObj *pSql) {
pQueryInfo->slidingTimeUnit, tinfo.precision);
int32_t rows = taosGetNumOfResultWithFill(pFillInfo, 0, etime, pLocalReducer->resColModel->capacity);
if (rows > 0) { // do interpo
doInterpolateResult(pSql, pLocalReducer, true);
doFillResult(pSql, pLocalReducer, true);
}
}
......@@ -1408,13 +1409,11 @@ static void doProcessResultInNextWindow(SSqlObj *pSql, int32_t numOfRes) {
size_t size = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t k = 0; k < size; ++k) {
SSqlExpr * pExpr = tscSqlExprGet(pQueryInfo, k);
SQLFunctionCtx *pCtx = &pLocalReducer->pCtx[k];
pCtx->aOutputBuf += pCtx->outputBytes * numOfRes;
// set the correct output timestamp column position
if (pExpr->functionId == TSDB_FUNC_TOP || pExpr->functionId == TSDB_FUNC_BOTTOM) {
if (pCtx->functionId == TSDB_FUNC_TOP || pCtx->functionId == TSDB_FUNC_BOTTOM) {
pCtx->ptsOutputBuf = ((char *)pCtx->ptsOutputBuf + TSDB_KEYSIZE * numOfRes);
}
}
......
......@@ -334,7 +334,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
if (rpcMsg->code != TSDB_CODE_ACTION_IN_PROGRESS) {
rpcMsg->code = pRes->code ? pRes->code : pRes->numOfRows;
rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? pRes->numOfRows: pRes->code;
tscTrace("%p SQL result:%s res:%p", pSql, tstrerror(pRes->code), pSql);
bool shouldFree = tscShouldBeFreed(pSql);
......@@ -2312,7 +2312,7 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
}
pRes->row = 0;
tscTrace("%p numOfRows:%d, offset:%d", pSql, pRes->numOfRows, pRes->offset);
tscTrace("%p numOfRows:%d, offset:%d, complete:%d", pSql, pRes->numOfRows, pRes->offset, pRes->completed);
return 0;
}
......
......@@ -1557,8 +1557,8 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
assert(pRes->numOfRows == numOfRows);
int64_t num = atomic_add_fetch_64(&pState->numOfRetrievedRows, numOfRows);
// tscTrace("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%d from ip:%u,vid:%d,orderOfSub:%d", pPObj, pSql,
// pRes->numOfRows, pState->numOfRetrievedRows, pSvd->ip, pSvd->vnode, idx);
tscTrace("%p sub:%p retrieve numOfRows:%d totalNumOfRows:%d from ip:%s, orderOfSub:%d", pPObj, pSql,
pRes->numOfRows, pState->numOfRetrievedRows, pSql->ipList.fqdn[pSql->ipList.inUse], idx);
if (num > tsMaxNumOfOrderedResults && tscIsProjectionQueryOnSTable(pQueryInfo, 0)) {
tscError("%p sub:%p num of OrderedRes is too many, max allowed:%" PRId64 " , current:%" PRId64,
......@@ -1713,6 +1713,11 @@ static void multiVnodeInsertMerge(void* param, TAOS_RES* tres, int numOfRows) {
// increase the total inserted rows
if (numOfRows > 0) {
pParentObj->res.numOfRows += numOfRows;
} else {
SSqlObj* pSql = (SSqlObj*) tres;
assert(pSql != NULL && pSql->res.code == numOfRows);
pParentObj->res.code = pSql->res.code;
}
taos_free_result(tres);
......@@ -1947,7 +1952,8 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
for (int i = 0; i < tscNumOfFields(pQueryInfo); ++i) {
size_t size = tscNumOfFields(pQueryInfo);
for (int i = 0; i < size; ++i) {
SFieldSupInfo* pSup = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, i);
if (pSup->pSqlExpr != NULL) {
tscGetResultColumnChr(pRes, &pQueryInfo->fieldsInfo, i);
......
......@@ -2104,7 +2104,7 @@ void tscTryQueryNextClause(SSqlObj* pSql, void (*queryFp)()) {
}
void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pFieldInfo, int32_t columnIndex) {
SFieldSupInfo* pInfo = tscFieldInfoGetSupp(pFieldInfo, columnIndex);
SFieldSupInfo* pInfo = taosArrayGet(pFieldInfo->pSupportInfo, columnIndex);//tscFieldInfoGetSupp(pFieldInfo, columnIndex);
assert(pInfo->pSqlExpr != NULL);
int32_t type = pInfo->pSqlExpr->resType;
......
......@@ -188,9 +188,10 @@ typedef void *TsdbPosT;
* @param tsdb tsdb handle
* @param pCond query condition, including time window, result set order, and basic required columns for each block
* @param groupInfo tableId list in the form of set, seperated into different groups according to group by condition
* @param qinfo query info handle from query processor
* @return
*/
TsdbQueryHandleT *tsdbQueryTables(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupInfo);
TsdbQueryHandleT *tsdbQueryTables(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupInfo, void* qinfo);
/**
* Get the last row of the given query time window for all the tables in STableGroupInfo object.
......@@ -202,11 +203,11 @@ TsdbQueryHandleT *tsdbQueryTables(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STable
* @param groupInfo tableId list.
* @return
*/
TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupInfo);
TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupInfo, void* qinfo);
SArray* tsdbGetQueriedTableIdList(TsdbQueryHandleT *pHandle);
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList);
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo);
/**
* move to next block if exists
......
......@@ -172,10 +172,11 @@ typedef struct SQueryRuntimeEnv {
STSBuf* pTSBuf;
STSCursor cur;
SQueryCostInfo summary;
bool stableQuery; // super table query or not
bool stableQuery; // super table query or not
void* pQueryHandle;
void* pSecQueryHandle; // another thread for
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
bool topBotQuery; // false;
} SQueryRuntimeEnv;
typedef struct SQInfo {
......
......@@ -31,7 +31,8 @@ void closeTimeWindow(SWindowResInfo* pWindowResInfo, int32_t slot);
void closeAllTimeWindow(SWindowResInfo* pWindowResInfo);
void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_t order);
SWindowResult *getWindowResult(SWindowResInfo *pWindowResInfo, int32_t slot);
int32_t curTimeWindow(SWindowResInfo *pWindowResInfo);
#define curTimeWindow(_winres) ((_winres)->curIndex)
bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot);
void createQueryResultInfo(SQuery *pQuery, SWindowResult *pResultRow, bool isSTableQuery, SPosInfo *posInfo);
......
......@@ -28,7 +28,7 @@ extern "C" {
#include "tdataformat.h"
#include "talgo.h"
#define DEFAULT_PAGE_SIZE (1024L*56) // 16k larger than the SHistoInfo
#define DEFAULT_PAGE_SIZE (1024L*64) // 16k larger than the SHistoInfo
#define MAX_TMPFILE_PATH_LENGTH PATH_MAX
#define INITIAL_ALLOCATION_BUFFER_SIZE 64
......@@ -96,7 +96,7 @@ typedef struct SColumnOrderInfo {
typedef struct tOrderDescriptor {
SColumnModel * pColumnModel;
int32_t tsOrder; // timestamp order type if exists
SColumnOrderInfo orderIdx;
SColumnOrderInfo orderInfo;
} tOrderDescriptor;
typedef struct tExtMemBuffer {
......
......@@ -85,7 +85,7 @@ SIDList getDataBufPagesIdList(SDiskbasedResultBuf* pResultBuf, int32_t groupId);
* @param id
* @return
*/
tFilePage* getResultBufferPageById(SDiskbasedResultBuf* pResultBuf, int32_t id);
#define GET_RES_BUF_PAGE_BY_ID(buf, id) ((tFilePage*)((buf)->pBuf + DEFAULT_INTERN_BUF_PAGE_SIZE*(id)))
/**
* get the total buffer size in the format of disk file
......
......@@ -647,9 +647,9 @@ cmd ::= ALTER TABLE ids(X) cpxName(F) SET TAG ids(Y) EQ tagitem(Z). {
}
////////////////////////////////////////kill statement///////////////////////////////////////
cmd ::= KILL CONNECTION IPTOKEN(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &X);}
cmd ::= KILL STREAM IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &X);}
cmd ::= KILL QUERY IPTOKEN(X) COLON(Z) INTEGER(Y) COLON(K) INTEGER(F). {X.n += (Z.n + Y.n + K.n + F.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &X);}
cmd ::= KILL CONNECTION INTEGER(Y). {setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &Y);}
cmd ::= KILL STREAM INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &X);}
cmd ::= KILL QUERY INTEGER(X) COLON(Z) INTEGER(Y). {X.n += (Z.n + Y.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &X);}
%fallback ID ABORT AFTER ASC ATTACH BEFORE BEGIN CASCADE CLUSTER CONFLICT COPY DATABASE DEFERRED
DELIMITERS DESC DETACH EACH END EXPLAIN FAIL FOR GLOB IGNORE IMMEDIATE INITIALLY INSTEAD
......
......@@ -272,9 +272,18 @@ bool top_bot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, char *mi
bool stableQueryFunctChanged(int32_t funcId);
void resetResultInfo(SResultInfo *pResInfo);
void initResultInfo(SResultInfo *pResInfo);
void setResultInfoBuf(SResultInfo *pResInfo, int32_t size, bool superTable);
static FORCE_INLINE void initResultInfo(SResultInfo *pResInfo) {
pResInfo->initialized = true; // the this struct has been initialized flag
pResInfo->complete = false;
pResInfo->hasResult = false;
pResInfo->numOfRes = 0;
memset(pResInfo->interResultBuf, 0, (size_t)pResInfo->bufLen);
}
#ifdef __cplusplus
}
#endif
......
此差异已折叠。
......@@ -206,11 +206,6 @@ bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot) {
return (getWindowResult(pWindowResInfo, slot)->status.closed == true);
}
int32_t curTimeWindow(SWindowResInfo *pWindowResInfo) {
assert(pWindowResInfo->curIndex >= 0 && pWindowResInfo->curIndex < pWindowResInfo->size);
return pWindowResInfo->curIndex;
}
void closeTimeWindow(SWindowResInfo *pWindowResInfo, int32_t slot) {
getWindowResult(pWindowResInfo, slot)->status.closed = true;
}
......
......@@ -356,17 +356,15 @@ static FORCE_INLINE int32_t primaryKeyComparator(int64_t f1, int64_t f2, int32_t
static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, int32_t type, int32_t bytes) {
switch (type) {
case TSDB_DATA_TYPE_INT: {
int32_t first = *(int32_t *)f1;
int32_t second = *(int32_t *)f2;
int32_t first = *(int32_t *) f1;
int32_t second = *(int32_t *) f2;
if (first == second) {
return 0;
}
return (first < second) ? -1 : 1;
};
case TSDB_DATA_TYPE_DOUBLE: {
//double first = *(double *)f1;
double first = GET_DOUBLE_VAL(f1);
//double second = *(double *)f2;
double first = GET_DOUBLE_VAL(f1);
double second = GET_DOUBLE_VAL(f2);
if (first == second) {
return 0;
......@@ -374,9 +372,7 @@ static FORCE_INLINE int32_t columnValueAscendingComparator(char *f1, char *f2, i
return (first < second) ? -1 : 1;
};
case TSDB_DATA_TYPE_FLOAT: {
//float first = *(float *)f1;
//float second = *(float *)f2;
float first = GET_FLOAT_VAL(f1);
float first = GET_FLOAT_VAL(f1);
float second = GET_FLOAT_VAL(f2);
if (first == second) {
return 0;
......@@ -439,9 +435,9 @@ int32_t compare_a(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1,
int32_t s2, char *data2) {
assert(numOfRows1 == numOfRows2);
int32_t cmpCnt = pDescriptor->orderIdx.numOfCols;
int32_t cmpCnt = pDescriptor->orderInfo.numOfCols;
for (int32_t i = 0; i < cmpCnt; ++i) {
int32_t colIdx = pDescriptor->orderIdx.pData[i];
int32_t colIdx = pDescriptor->orderInfo.pData[i];
char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pColumnModel, numOfRows1, s1, colIdx);
char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pColumnModel, numOfRows2, s2, colIdx);
......@@ -471,9 +467,9 @@ int32_t compare_d(tOrderDescriptor *pDescriptor, int32_t numOfRows1, int32_t s1,
int32_t s2, char *data2) {
assert(numOfRows1 == numOfRows2);
int32_t cmpCnt = pDescriptor->orderIdx.numOfCols;
int32_t cmpCnt = pDescriptor->orderInfo.numOfCols;
for (int32_t i = 0; i < cmpCnt; ++i) {
int32_t colIdx = pDescriptor->orderIdx.pData[i];
int32_t colIdx = pDescriptor->orderInfo.pData[i];
char *f1 = COLMODEL_GET_VAL(data1, pDescriptor->pColumnModel, numOfRows1, s1, colIdx);
char *f2 = COLMODEL_GET_VAL(data2, pDescriptor->pColumnModel, numOfRows2, s2, colIdx);
......@@ -563,13 +559,13 @@ static void median(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
int32_t midIdx = ((end - start) >> 1) + start;
#if defined(_DEBUG_VIEW)
int32_t f = pDescriptor->orderIdx.pData[0];
int32_t f = pDescriptor->orderInfo.pData[0];
char *midx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, midIdx, f);
char *startx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, start, f);
char *endx = COLMODEL_GET_VAL(data, pDescriptor->pColumnModel, numOfRows, end, f);
int32_t colIdx = pDescriptor->orderIdx.pData[0];
int32_t colIdx = pDescriptor->orderInfo.pData[0];
tSortDataPrint(pDescriptor->pColumnModel->pFields[colIdx].field.type, "before", startx, midx, endx);
#endif
......@@ -596,7 +592,7 @@ static void median(tOrderDescriptor *pDescriptor, int32_t numOfRows, int32_t sta
}
static UNUSED_FUNC void tRowModelDisplay(tOrderDescriptor *pDescriptor, int32_t numOfRows, char *d, int32_t len) {
int32_t colIdx = pDescriptor->orderIdx.pData[0];
int32_t colIdx = pDescriptor->orderInfo.pData[0];
for (int32_t i = 0; i < len; ++i) {
char *startx = COLMODEL_GET_VAL(d, pDescriptor->pColumnModel, numOfRows, i, colIdx);
......@@ -1062,9 +1058,9 @@ tOrderDescriptor *tOrderDesCreate(const int32_t *orderColIdx, int32_t numOfOrder
desc->pColumnModel = pModel;
desc->tsOrder = tsOrderType;
desc->orderIdx.numOfCols = numOfOrderCols;
desc->orderInfo.numOfCols = numOfOrderCols;
for (int32_t i = 0; i < numOfOrderCols; ++i) {
desc->orderIdx.pData[i] = orderColIdx[i];
desc->orderInfo.pData[i] = orderColIdx[i];
}
return desc;
......
......@@ -50,12 +50,6 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t si
return TSDB_CODE_SUCCESS;
}
tFilePage* getResultBufferPageById(SDiskbasedResultBuf* pResultBuf, int32_t id) {
assert(id < pResultBuf->numOfPages && id >= 0);
return (tFilePage*)(pResultBuf->pBuf + DEFAULT_INTERN_BUF_PAGE_SIZE * id);
}
int32_t getNumOfResultBufGroupId(SDiskbasedResultBuf* pResultBuf) { return taosHashGetSize(pResultBuf->idsTable); }
int32_t getResBufSize(SDiskbasedResultBuf* pResultBuf) { return pResultBuf->totalBufSize; }
......@@ -169,7 +163,7 @@ tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32
*pageId = (pResultBuf->allocateId++);
registerPageId(pResultBuf, groupId, *pageId);
tFilePage* page = getResultBufferPageById(pResultBuf, *pageId);
tFilePage* page = GET_RES_BUF_PAGE_BY_ID(pResultBuf, *pageId);
// clear memory for the new page
memset(page, 0, DEFAULT_INTERN_BUF_PAGE_SIZE);
......
......@@ -127,7 +127,7 @@ void rpcAddConnIntoCache(void *handle, void *data, char *fqdn, uint16_t port, in
hash = rpcHashConn(pCache, fqdn, port, connType);
pNode = (SConnHash *)taosMemPoolMalloc(pCache->connHashMemPool);
strcpy(pNode->fqdn, fqdn);
tstrncpy(pNode->fqdn, fqdn, sizeof(pNode->fqdn));
pNode->port = port;
pNode->connType = connType;
pNode->data = data;
......
......@@ -60,7 +60,7 @@ typedef struct {
void *idPool; // handle to ID pool
void *tmrCtrl; // handle to timer
void *hash; // handle returned by hash utility
SHashObj *hash; // handle returned by hash utility
void *tcphandle;// returned handle from TCP initialization
void *udphandle;// returned handle from UDP initialization
void *pCache; // connection cache
......@@ -211,7 +211,7 @@ void *rpcOpen(const SRpcInit *pInit) {
pRpc = (SRpcInfo *)calloc(1, sizeof(SRpcInfo));
if (pRpc == NULL) return NULL;
if(pInit->label) strcpy(pRpc->label, pInit->label);
if(pInit->label) tstrncpy(pRpc->label, pInit->label, sizeof(pRpc->label));
pRpc->connType = pInit->connType;
pRpc->idleTime = pInit->idleTime;
pRpc->numOfThreads = pInit->numOfThreads>TSDB_MAX_RPC_THREADS ? TSDB_MAX_RPC_THREADS:pInit->numOfThreads;
......@@ -228,7 +228,7 @@ void *rpcOpen(const SRpcInit *pInit) {
size_t size = sizeof(SRpcConn) * pRpc->sessions;
pRpc->connList = (SRpcConn *)calloc(1, size);
if (pRpc->connList == NULL) {
tError("%s failed to allocate memory for taos connections, size:%d", pRpc->label, size);
tError("%s failed to allocate memory for taos connections, size:%ld", pRpc->label, size);
rpcClose(pRpc);
return NULL;
}
......@@ -459,7 +459,7 @@ int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo) {
pInfo->clientPort = pConn->peerPort;
// pInfo->serverIp = pConn->destIp;
strcpy(pInfo->user, pConn->user);
strncpy(pInfo->user, pConn->user, sizeof(pInfo->user));
return 0;
}
......@@ -503,10 +503,10 @@ static SRpcConn *rpcOpenConn(SRpcInfo *pRpc, char *peerFqdn, uint16_t peerPort,
pConn = rpcAllocateClientConn(pRpc);
if (pConn) {
strcpy(pConn->peerFqdn, peerFqdn);
tstrncpy(pConn->peerFqdn, peerFqdn, sizeof(pConn->peerFqdn));
pConn->peerIp = peerIp;
pConn->peerPort = peerPort;
strcpy(pConn->user, pRpc->user);
tstrncpy(pConn->user, pRpc->user, sizeof(pConn->user));
pConn->connType = connType;
if (taosOpenConn[connType]) {
......@@ -804,7 +804,7 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv) {
pConn = rpcGetConnObj(pRpc, sid, pRecv);
if (pConn == NULL) {
tTrace("%s %p, failed to get connection obj(%s)", pRpc->label, pHead->ahandle, tstrerror(terrno));
tTrace("%s %p, failed to get connection obj(%s)", pRpc->label, (void *)pHead->ahandle, tstrerror(terrno));
return NULL;
} else {
if (rpcIsReq(pHead->msgType)) {
......
......@@ -73,7 +73,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
pServerObj = (SServerObj *)calloc(sizeof(SServerObj), 1);
pServerObj->ip = ip;
pServerObj->port = port;
strcpy(pServerObj->label, label);
tstrncpy(pServerObj->label, label, sizeof(pServerObj->label));
pServerObj->numOfThreads = numOfThreads;
pServerObj->pThreadObj = (SThreadObj *)calloc(sizeof(SThreadObj), numOfThreads);
......@@ -87,7 +87,7 @@ void *taosInitTcpServer(uint32_t ip, uint16_t port, char *label, int numOfThread
pThreadObj = pServerObj->pThreadObj;
for (int i = 0; i < numOfThreads; ++i) {
pThreadObj->processData = fp;
strcpy(pThreadObj->label, label);
tstrncpy(pThreadObj->label, label, sizeof(pThreadObj->label));
pThreadObj->shandle = shandle;
code = pthread_mutex_init(&(pThreadObj->mutex), NULL);
......@@ -247,7 +247,7 @@ void *taosInitTcpClient(uint32_t ip, uint16_t port, char *label, int num, void *
pThreadObj = (SThreadObj *)malloc(sizeof(SThreadObj));
memset(pThreadObj, 0, sizeof(SThreadObj));
strcpy(pThreadObj->label, label);
tstrncpy(pThreadObj->label, label, sizeof(pThreadObj->label));
pThreadObj->ip = ip;
pThreadObj->shandle = shandle;
......
......@@ -72,7 +72,7 @@ void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads
pSet->port = port;
pSet->shandle = shandle;
pSet->fp = fp;
strcpy(pSet->label, label);
tstrncpy(pSet->label, label, sizeof(pSet->label));
uint16_t ownPort;
for (int i = 0; i < threads; ++i) {
......@@ -99,7 +99,7 @@ void *taosInitUdpConnection(uint32_t ip, uint16_t port, char *label, int threads
pConn->localPort = (uint16_t)ntohs(sin.sin_port);
}
strcpy(pConn->label, label);
tstrncpy(pConn->label, label, sizeof(pConn->label));
pConn->shandle = shandle;
pConn->processData = fp;
pConn->index = i;
......
......@@ -76,6 +76,7 @@ int main(int argc, char *argv[]) {
int numOfReqs = 0;
int appThreads = 1;
char serverIp[40] = "127.0.0.1";
char secret[TSDB_KEY_LEN] = "mypassword";
struct timeval systemTime;
int64_t startTime, endTime;
pthread_attr_t thattr;
......@@ -97,7 +98,7 @@ int main(int argc, char *argv[]) {
rpcInit.sessions = 100;
rpcInit.idleTime = tsShellActivityTimer*1000;
rpcInit.user = "michael";
rpcInit.secret = "mypassword";
rpcInit.secret = secret;
rpcInit.ckey = "key";
rpcInit.spi = 1;
rpcInit.connType = TAOS_CONN_CLIENT;
......@@ -106,7 +107,7 @@ int main(int argc, char *argv[]) {
if (strcmp(argv[i], "-p")==0 && i < argc-1) {
ipSet.port[0] = atoi(argv[++i]);
} else if (strcmp(argv[i], "-i") ==0 && i < argc-1) {
tstrncpy(ipSet.fqdn[0], argv[++i], sizeof(ipSet.fqdn));
tstrncpy(ipSet.fqdn[0], argv[++i], sizeof(ipSet.fqdn[0]));
} else if (strcmp(argv[i], "-t")==0 && i < argc-1) {
rpcInit.numOfThreads = atoi(argv[++i]);
} else if (strcmp(argv[i], "-m")==0 && i < argc-1) {
......
......@@ -77,6 +77,7 @@ int main(int argc, char *argv[]) {
int numOfReqs = 0;
int appThreads = 1;
char serverIp[40] = "127.0.0.1";
char secret[TSDB_KEY_LEN] = "mypassword";
struct timeval systemTime;
int64_t startTime, endTime;
pthread_attr_t thattr;
......@@ -98,7 +99,7 @@ int main(int argc, char *argv[]) {
rpcInit.sessions = 100;
rpcInit.idleTime = tsShellActivityTimer*1000;
rpcInit.user = "michael";
rpcInit.secret = "mypassword";
rpcInit.secret = secret;
rpcInit.ckey = "key";
rpcInit.spi = 1;
rpcInit.connType = TAOS_CONN_CLIENT;
......
......@@ -17,6 +17,7 @@
#include "tulog.h"
#include "talgo.h"
#include "tutil.h"
#include "ttime.h"
#include "tcompare.h"
#include "exception.h"
......@@ -71,6 +72,8 @@ typedef struct STableCheckInfo {
int32_t compSize;
int32_t numOfBlocks; // number of qualified data blocks not the original blocks
SDataCols* pDataCols;
int32_t chosen; // indicate which iterator should move forward
bool initBuf; // whether to initialize the in-memory skip list iterator or not
SSkipListIterator* iter; // mem buffer skip list iterator
SSkipListIterator* iiter; // imem buffer skip list iterator
......@@ -79,8 +82,6 @@ typedef struct STableCheckInfo {
typedef struct STableBlockInfo {
SCompBlock* compBlock;
STableCheckInfo* pTableCheckInfo;
// int32_t blockIndex;
// int32_t groupIdx; /* number of group is less than the total number of tables */
} STableBlockInfo;
typedef struct SBlockOrderSupporter {
......@@ -120,7 +121,7 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle);
static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* pCheckInfo, SCompBlock* pBlock,
SArray* sa);
static int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
STsdbQueryHandle* pQueryHandle);
static void tsdbInitDataBlockLoadInfo(SDataBlockLoadInfo* pBlockLoadInfo) {
......@@ -134,7 +135,7 @@ static void tsdbInitCompBlockLoadInfo(SLoadCompBlockInfo* pCompBlockLoadInfo) {
pCompBlockLoadInfo->fileId = -1;
}
TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList) {
TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) {
// todo 1. filter not exist table
// todo 2. add the reference count for each table that is involved in query
......@@ -147,6 +148,7 @@ TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STable
pQueryHandle->cur.win = TSWINDOW_INITIALIZER;
pQueryHandle->checkFiles = true;//ASCENDING_TRAVERSE(pQueryHandle->order);
pQueryHandle->activeIndex = 0; // current active table index
pQueryHandle->qinfo = qinfo;
pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock;
tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb);
......@@ -201,8 +203,8 @@ TsdbQueryHandleT* tsdbQueryTables(TsdbRepoT* tsdb, STsdbQueryCond* pCond, STable
return (TsdbQueryHandleT) pQueryHandle;
}
TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList);
TsdbQueryHandleT tsdbQueryLastRow(TsdbRepoT *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo);
pQueryHandle->type = TSDB_QUERY_TYPE_LAST;
pQueryHandle->order = TSDB_ORDER_DESC;
......@@ -227,8 +229,8 @@ SArray* tsdbGetQueriedTableIdList(TsdbQueryHandleT *pHandle) {
return res;
}
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList);
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(TsdbRepoT *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo);
pQueryHandle->type = TSDB_QUERY_TYPE_EXTERNAL;
// pQueryHandle->outputCapacity = 2; // only allowed two rows to be loaded
......@@ -303,6 +305,83 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
return true;
}
SDataRow getSDataRowInTableMem(STableCheckInfo* pCheckInfo) {
SDataRow rmem = NULL, rimem = NULL;
if (pCheckInfo->iter) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
if (node != NULL) {
rmem = SL_GET_NODE_DATA(node);
}
}
if (pCheckInfo->iiter) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter);
if (node != NULL) {
rimem = SL_GET_NODE_DATA(node);
}
}
if (rmem != NULL && rimem != NULL) {
if (dataRowKey(rmem) < dataRowKey(rimem)) {
pCheckInfo->chosen = 0;
return rmem;
} else if (dataRowKey(rmem) == dataRowKey(rimem)) {
// data ts are duplicated, ignore the data in mem
tSkipListIterNext(pCheckInfo->iter);
pCheckInfo->chosen = 1;
return rimem;
} else {
pCheckInfo->chosen = 1;
return rimem;
}
}
if (rmem != NULL) {
pCheckInfo->chosen = 0;
return rmem;
}
if (rimem != NULL) {
pCheckInfo->chosen = 1;
return rimem;
}
return NULL;
}
bool moveToNextRow(STableCheckInfo* pCheckInfo) {
bool hasNext = false;
if (pCheckInfo->chosen == 0) {
if (pCheckInfo->iter != NULL) {
hasNext = tSkipListIterNext(pCheckInfo->iter);
}
if (hasNext) {
return hasNext;
}
if (pCheckInfo->iiter != NULL) {
return tSkipListIterGet(pCheckInfo->iiter) != NULL;
}
} else {
if (pCheckInfo->chosen == 1) {
if (pCheckInfo->iiter != NULL) {
hasNext = tSkipListIterNext(pCheckInfo->iiter);
}
if (hasNext) {
return hasNext;
}
if (pCheckInfo->iter != NULL) {
return tSkipListIterGet(pCheckInfo->iter) != NULL;
}
}
}
return hasNext;
}
static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
size_t size = taosArrayGetSize(pHandle->pTableCheckInfo);
assert(pHandle->activeIndex < size && pHandle->activeIndex >= 0 && size >= 1);
......@@ -312,31 +391,13 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
STable* pTable = pCheckInfo->pTableObj;
assert(pTable != NULL);
// no data in cache, abort
if (pTable->mem == NULL && pTable->imem == NULL) {
return false;
}
if (pCheckInfo->iter == NULL && pTable->mem) {
pCheckInfo->iter = tSkipListCreateIterFromVal(pTable->mem->pData, (const char*) &pCheckInfo->lastKey,
TSDB_DATA_TYPE_TIMESTAMP, pHandle->order);
if (pCheckInfo->iter == NULL) {
return false;
}
if (!tSkipListIterNext(pCheckInfo->iter)) { // buffer is empty
return false;
}
}
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
if (node == NULL) {
initTableMemIterator(pHandle, pCheckInfo);
SDataRow row = getSDataRowInTableMem(pCheckInfo);
if (row == NULL) {
return false;
}
SDataRow row = SL_GET_NODE_DATA(node);
pCheckInfo->lastKey = dataRowKey(row); // first timestamp in buffer
tsdbTrace("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %p", pHandle,
pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qinfo);
......@@ -349,7 +410,7 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) {
int32_t step = ASCENDING_TRAVERSE(pHandle->order)? 1:-1;
STimeWindow* win = &pHandle->cur.win;
pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo->iter, pCheckInfo->pTableObj, pHandle->window.ekey,
pHandle->cur.rows = tsdbReadRowsFromCache(pCheckInfo, pHandle->window.ekey,
pHandle->outputCapacity, &win->skey, &win->ekey, pHandle); // todo refactor API
// update the last key value
......@@ -382,7 +443,7 @@ static int32_t getFileIdFromKey(TSKEY key, int32_t daysPerFile, int32_t precisio
return fid;
}
static int32_t binarySearchForBlockImpl(SCompBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) {
static int32_t binarySearchForBlock(SCompBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) {
int32_t firstSlot = 0;
int32_t lastSlot = numOfBlocks - 1;
......@@ -448,7 +509,7 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
TSKEY e = MAX(pCheckInfo->lastKey, pQueryHandle->window.ekey);
// discard the unqualified data block based on the query time window
int32_t start = binarySearchForBlockImpl(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC);
int32_t start = binarySearchForBlock(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC);
int32_t end = start;
if (s > pCompInfo->blocks[start].keyLast) {
......@@ -522,7 +583,9 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
bool blockLoaded = false;
SArray* sa = getDefaultLoadColumns(pQueryHandle, true);
int64_t st = taosGetTimestampUs();
if (pCheckInfo->pDataCols == NULL) {
STsdbMeta* pMeta = tsdbGetMeta(pRepo);
pCheckInfo->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pRepo->config.maxRowsPerFileBlock);
......@@ -540,9 +603,15 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo
blockLoaded = true;
}
SDataCols* pCols = pQueryHandle->rhelper.pDataCols[0];
assert(pCols->numOfRows != 0);
taosArrayDestroy(sa);
tfree(data);
int64_t et = taosGetTimestampUs() - st;
tsdbTrace("%p load file block into buffer, elapsed time:%"PRId64 " us", pQueryHandle, et);
return blockLoaded;
}
......@@ -600,7 +669,7 @@ static void handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SCompBlock*
// do not load file block into buffer
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order) ? 1 : -1;
cur->rows = tsdbReadRowsFromCache(pCheckInfo->iter, pCheckInfo->pTableObj, binfo.window.skey - step,
cur->rows = tsdbReadRowsFromCache(pCheckInfo, binfo.window.skey - step,
pQueryHandle->outputCapacity, &cur->win.skey, &cur->win.ekey, pQueryHandle);
pQueryHandle->realNumOfRows = cur->rows;
......@@ -649,7 +718,7 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
if (!doLoadFileDataBlock(pQueryHandle, pBlock, pCheckInfo)) {
return false;
}
SDataCols* pTSCol = pQueryHandle->rhelper.pDataCols[0];
assert(pTSCol->cols->type == TSDB_DATA_TYPE_TIMESTAMP && pTSCol->numOfRows == pBlock->numOfRows);
......@@ -1155,7 +1224,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
*numOfAllocBlocks = numOfBlocks;
int32_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
SBlockOrderSupporter sup = {0};
sup.numOfTables = numOfTables;
sup.numOfBlocksPerTable = calloc(1, sizeof(int32_t) * numOfTables);
......@@ -1192,15 +1261,26 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO
pBlockInfo->compBlock = &pBlock[k];
pBlockInfo->pTableCheckInfo = pTableCheck;
// pBlockInfo->groupIdx = pTableCheckInfo[j]->groupIdx; // set the group index
// pBlockInfo->blockIndex = pTableCheckInfo[j]->start + k; // set the block index in original table
cnt++;
}
numOfQualTables++;
}
tsdbTrace("%p create data blocks info struct completed, %d blocks in %d tables", pQueryHandle, cnt, numOfQualTables);
assert(numOfBlocks == cnt);
// since there is only one table qualified, blocks are not sorted
if (numOfQualTables == 1) {
memcpy(pQueryHandle->pDataBlockInfo, sup.pDataBlockInfo[0], sizeof(STableBlockInfo) * numOfBlocks);
cleanBlockOrderSupporter(&sup, numOfQualTables);
tsdbTrace("%p create data blocks info struct completed for 1 table, %d blocks not sorted %p ", pQueryHandle, cnt,
pQueryHandle->qinfo);
return TSDB_CODE_SUCCESS;
}
tsdbTrace("%p create data blocks info struct completed, %d blocks in %d tables %p", pQueryHandle, cnt,
numOfQualTables, pQueryHandle->qinfo);
assert(cnt <= numOfBlocks && numOfQualTables <= numOfTables); // the pTableQueryInfo[j]->numOfBlocks may be 0
sup.numOfTables = numOfQualTables;
......@@ -1257,8 +1337,8 @@ static bool getDataBlocksInFilesImpl(STsdbQueryHandle* pQueryHandle) {
break;
}
tsdbTrace("%p %d blocks found in file for %d table(s), fid:%d", pQueryHandle, numOfBlocks,
numOfTables, pQueryHandle->pFileGroup->fileId);
tsdbTrace("%p %d blocks found in file for %d table(s), fid:%d, %p", pQueryHandle, numOfBlocks,
numOfTables, pQueryHandle->pFileGroup->fileId, pQueryHandle->qinfo);
assert(numOfBlocks >= 0);
if (numOfBlocks == 0) {
......@@ -1565,19 +1645,22 @@ static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle) {
pQueryHandle->window = (STimeWindow) {info.lastKey, TSKEY_INITIAL_VAL};
}
static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int maxRowsToRead, TSKEY* skey, TSKEY* ekey,
STsdbQueryHandle* pQueryHandle) {
int numOfRows = 0;
int32_t numOfCols = taosArrayGetSize(pQueryHandle->pColumns);
*skey = TSKEY_INITIAL_VAL;
int64_t st = taosGetTimestampUs();
STSchema* pSchema = tsdbGetTableSchema(tsdbGetMeta(pQueryHandle->pTsdb), pCheckInfo->pTableObj);
int32_t numOfTableCols = schemaNCols(pSchema);
do {
SSkipListNode* node = tSkipListIterGet(pIter);
if (node == NULL) {
SDataRow row = getSDataRowInTableMem(pCheckInfo);
if (row == NULL) {
break;
}
SDataRow row = SL_GET_NODE_DATA(node);
TSKEY key = dataRowKey(row);
if ((key > maxKey && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
......@@ -1590,49 +1673,69 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
}
if (*skey == INT64_MIN) {
*skey = dataRowKey(row);
*skey = key;
}
*ekey = dataRowKey(row);
int32_t offset = -1;
*ekey = key;
char* pData = NULL;
STSchema* pSchema = tsdbGetTableSchema(tsdbGetMeta(pQueryHandle->pTsdb), pTable);
int32_t numOfTableCols = schemaNCols(pSchema);
for (int32_t i = 0; i < numOfCols; ++i) {
int32_t i = 0, j = 0;
while(i < numOfCols && j < numOfTableCols) {
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
if (pSchema->columns[j].colId < pColInfo->info.colId) {
j++;
continue;
}
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
pData = pColInfo->pData + numOfRows * pColInfo->info.bytes;
} else {
pData = pColInfo->pData + (maxRowsToRead - numOfRows - 1) * pColInfo->info.bytes;
}
for(int32_t j = 0; j < numOfTableCols; ++j) {
if (pColInfo->info.colId == pSchema->columns[j].colId) {
offset = pSchema->columns[j].offset;
break;
if (pSchema->columns[j].colId == pColInfo->info.colId) {
void* value = tdGetRowDataOfCol(row, pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + pSchema->columns[j].offset);
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
memcpy(pData, value, varDataTLen(value));
} else {
memcpy(pData, value, pColInfo->info.bytes);
}
j++;
i++;
} else { // pColInfo->info.colId < pSchema->columns[j].colId, it is a NULL data
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
setVardataNull(pData, pColInfo->info.type);
} else {
setNull(pData, pColInfo->info.type, pColInfo->info.bytes);
}
i++;
}
assert(offset != -1); // todo handle error
void *value = tdGetRowDataOfCol(row, pColInfo->info.type, TD_DATA_ROW_HEAD_SIZE + offset);
}
while (i < numOfCols) { // the remain columns are all null data
SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
if (ASCENDING_TRAVERSE(pQueryHandle->order)) {
pData = pColInfo->pData + numOfRows * pColInfo->info.bytes;
} else {
pData = pColInfo->pData + (maxRowsToRead - numOfRows - 1) * pColInfo->info.bytes;
}
if (pColInfo->info.type == TSDB_DATA_TYPE_BINARY || pColInfo->info.type == TSDB_DATA_TYPE_NCHAR) {
memcpy(pData, value, varDataTLen(value));
setVardataNull(pData, pColInfo->info.type);
} else {
memcpy(pData, value, pColInfo->info.bytes);
setNull(pData, pColInfo->info.type, pColInfo->info.bytes);
}
i++;
}
if (++numOfRows >= maxRowsToRead) {
tSkipListIterNext(pIter);
moveToNextRow(pCheckInfo);
break;
}
} while(tSkipListIterNext(pIter));
} while(moveToNextRow(pCheckInfo));
assert(numOfRows <= maxRowsToRead);
......@@ -1646,6 +1749,10 @@ static int tsdbReadRowsFromCache(SSkipListIterator* pIter, STable* pTable, TSKEY
}
}
int64_t elapsedTime = taosGetTimestampUs() - st;
tsdbTrace("%p build data block from cache completed, elapsed time:%"PRId64" us, numOfRows:%d, numOfCols:%d", pQueryHandle,
elapsedTime, numOfRows, numOfCols);
return numOfRows;
}
......
......@@ -153,6 +153,8 @@ bool taosMbsToUcs4(char *mbs, size_t mbs_len, char *ucs4, int32_t ucs4_max_len,
int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes);
void taosRandStr(char* str, int32_t size);
int32_t taosUcs4ToMbs(void *ucs4, int32_t ucs4_max_len, char *mbs);
bool taosValidateEncodec(const char *encodec);
......
......@@ -92,12 +92,14 @@ void* taosArrayGet(const SArray* pArray, size_t index) {
}
void* taosArrayGetP(const SArray* pArray, size_t index) {
void* ret = taosArrayGet(pArray, index);
if (ret == NULL) {
assert(index < pArray->size);
void* d = TARRAY_GET_ELEM(pArray, index);
if (d == NULL) {
return NULL;
}
return *(void**)ret;
return *(void**)d;
}
size_t taosArrayGetSize(const SArray* pArray) { return pArray->size; }
......
......@@ -19,6 +19,7 @@
#include "tutil.h"
int taosGetFqdn(char *fqdn) {
int code = 0;
char hostname[1024];
hostname[1023] = '\0';
gethostname(hostname, 1023);
......@@ -27,13 +28,15 @@ int taosGetFqdn(char *fqdn) {
h = gethostbyname(hostname);
if (h != NULL) {
strcpy(fqdn, h->h_name);
return 0;
} else {
uError("failed to get host name");
return -1;
uError("failed to get host name(%s)", strerror(errno));
code = -1;
}
free(h);
// to do: free the resources
// free(h);
return code;
}
uint32_t taosGetIpFromFqdn(const char *fqdn) {
......@@ -47,7 +50,7 @@ uint32_t ip2uint(const char *const ip_addr) {
char ip_addr_cpy[20];
char ip[5];
strcpy(ip_addr_cpy, ip_addr);
tstrncpy(ip_addr_cpy, ip_addr, sizeof(ip_addr_cpy));
char *s_start, *s_end;
s_start = ip_addr_cpy;
......@@ -206,7 +209,7 @@ int taosOpenUdpSocket(uint32_t ip, uint16_t port) {
int reuse, nocheck;
int bufSize = 8192000;
uTrace("open udp socket:%s:%hu", ip, port);
uTrace("open udp socket:0x%x:%hu", ip, port);
memset((char *)&localAddr, 0, sizeof(localAddr));
localAddr.sin_family = AF_INET;
......@@ -257,7 +260,7 @@ int taosOpenUdpSocket(uint32_t ip, uint16_t port) {
/* bind socket to local address */
if (bind(sockFd, (struct sockaddr *)&localAddr, sizeof(localAddr)) < 0) {
uError("failed to bind udp socket: %d (%s), %s:%hu", errno, strerror(errno), ip, port);
uError("failed to bind udp socket: %d (%s), 0x%x:%hu", errno, strerror(errno), ip, port);
taosCloseSocket(sockFd);
return -1;
}
......@@ -363,7 +366,7 @@ int taosOpenTcpServerSocket(uint32_t ip, uint16_t port) {
int sockFd;
int reuse;
uTrace("open tcp server socket:%s:%hu", ip, port);
uTrace("open tcp server socket:0x%x:%hu", ip, port);
bzero((char *)&serverAdd, sizeof(serverAdd));
serverAdd.sin_family = AF_INET;
......
......@@ -27,8 +27,6 @@
#include "tulog.h"
#include "taoserror.h"
int32_t tmpFileSerialNum = 0;
int32_t strdequote(char *z) {
if (z == NULL) {
return 0;
......@@ -433,12 +431,24 @@ void getTmpfilePath(const char *fileNamePrefix, char *dstPath) {
#else
char *tmpDir = "/tmp/";
#endif
int64_t ts = taosGetTimestampUs();
strcpy(tmpPath, tmpDir);
strcat(tmpPath, tdengineTmpFileNamePrefix);
strcat(tmpPath, fileNamePrefix);
strcat(tmpPath, "-%d-%"PRIu64"-%u-%"PRIu64);
snprintf(dstPath, PATH_MAX, tmpPath, getpid(), taosGetPthreadId(), atomic_add_fetch_32(&tmpFileSerialNum, 1), ts);
strcat(tmpPath, "-%d-%s");
char rand[8] = {0};
taosRandStr(rand, tListLen(rand) - 1);
snprintf(dstPath, PATH_MAX, tmpPath, getpid(), rand);
}
void taosRandStr(char* str, int32_t size) {
const char* set = "abcdefghijklmnopqrstuvwxyz0123456789-_.";
int32_t len = 39;
for(int32_t i = 0; i < size; ++i) {
str[i] = set[rand()%len];
}
}
int tasoUcs4Compare(void* f1_ucs4, void *f2_ucs4, int bytes) {
......
......@@ -68,11 +68,19 @@ void *walOpen(const char *path, const SWalCfg *pCfg) {
pWal->num = 0;
pWal->level = pCfg->walLevel;
pWal->keep = pCfg->keep;
strcpy(pWal->path, path);
tstrncpy(pWal->path, path, sizeof(pWal->path));
pthread_mutex_init(&pWal->mutex, NULL);
if (access(path, F_OK) != 0) mkdir(path, 0755);
if (access(path, F_OK) != 0) {
if (mkdir(path, 0755) != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
wError("wal:%s, failed to create directory(%s)", path, strerror(errno));
pthread_mutex_destroy(&pWal->mutex);
free(pWal);
pWal = NULL;
}
}
if (pCfg->keep == 1) return pWal;
if (walHandleExistingFiles(path) == 0)
......@@ -80,7 +88,7 @@ void *walOpen(const char *path, const SWalCfg *pCfg) {
if (pWal->fd <0) {
terrno = TAOS_SYSTEM_ERROR(errno);
wError("wal:%s, failed to open", path);
wError("wal:%s, failed to open(%s)", path, strerror(errno));
pthread_mutex_destroy(&pWal->mutex);
free(pWal);
pWal = NULL;
......@@ -119,7 +127,8 @@ void walClose(void *handle) {
int walRenew(void *handle) {
if (handle == NULL) return 0;
SWal *pWal = handle;
int code = 0;
terrno = 0;
pthread_mutex_lock(&pWal->mutex);
......@@ -135,8 +144,8 @@ int walRenew(void *handle) {
pWal->fd = open(pWal->name, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO);
if (pWal->fd < 0) {
wError("wal:%d, failed to open(%s)", pWal->name, strerror(errno));
code = -1;
wError("wal:%s, failed to open(%s)", pWal->name, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
} else {
wTrace("wal:%s, it is created", pWal->name);
......@@ -156,14 +165,15 @@ int walRenew(void *handle) {
pthread_mutex_unlock(&pWal->mutex);
return code;
return terrno;
}
int walWrite(void *handle, SWalHead *pHead) {
SWal *pWal = handle;
int code = 0;
if (pWal == NULL) return -1;
terrno = 0;
// no wal
if (pWal->level == TAOS_WAL_NOLOG) return 0;
if (pHead->version <= pWal->version) return 0;
......@@ -174,12 +184,12 @@ int walWrite(void *handle, SWalHead *pHead) {
if(write(pWal->fd, pHead, contLen) != contLen) {
wError("wal:%s, failed to write(%s)", pWal->name, strerror(errno));
code = -1;
terrno = TAOS_SYSTEM_ERROR(errno);
} else {
pWal->version = pHead->version;
}
return code;
return terrno;
}
void walFsync(void *handle) {
......@@ -196,11 +206,11 @@ void walFsync(void *handle) {
int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int)) {
SWal *pWal = handle;
int code = 0;
struct dirent *ent;
int count = 0;
uint32_t maxId = 0, minId = -1, index =0;
terrno = 0;
int plen = strlen(walPrefix);
char opath[TSDB_FILENAME_LEN+5];
......@@ -224,30 +234,30 @@ int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int))
closedir(dir);
if (count == 0) {
if (pWal->keep) code = walRenew(pWal);
return code;
if (pWal->keep) terrno = walRenew(pWal);
return terrno;
}
if ( count != (maxId-minId+1) ) {
wError("wal:%s, messed up, count:%d max:%d min:%d", opath, count, maxId, minId);
code = -1;
terrno = TAOS_SYSTEM_ERROR(TSDB_CODE_APP_ERROR);
} else {
wTrace("wal:%s, %d files will be restored", opath, count);
for (index = minId; index<=maxId; ++index) {
sprintf(pWal->name, "%s/%s%d", opath, walPrefix, index);
code = walRestoreWalFile(pWal, pVnode, writeFp);
if (code < 0) break;
terrno = walRestoreWalFile(pWal, pVnode, writeFp);
if (terrno < 0) break;
}
}
if (code == 0) {
if (terrno == 0) {
if (pWal->keep == 0) {
code = walRemoveWalFiles(opath);
if (code == 0) {
terrno = walRemoveWalFiles(opath);
if (terrno == 0) {
if (remove(opath) < 0) {
wError("wal:%s, failed to remove directory(%s)", opath, strerror(errno));
code = -1;
terrno = TAOS_SYSTEM_ERROR(errno);
}
}
} else {
......@@ -258,12 +268,12 @@ int walRestore(void *handle, void *pVnode, int (*writeFp)(void *, void *, int))
pWal->fd = open(pWal->name, O_WRONLY | O_CREAT | O_APPEND, S_IRWXU | S_IRWXG | S_IRWXO);
if (pWal->fd < 0) {
wError("wal:%s, failed to open file(%s)", pWal->name, strerror(errno));
code = -1;
terrno = TAOS_SYSTEM_ERROR(errno);
}
}
}
return code;
return terrno;
}
int walGetWalFile(void *handle, char *name, uint32_t *index) {
......@@ -292,40 +302,47 @@ int walGetWalFile(void *handle, char *name, uint32_t *index) {
}
static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
int code = 0;
char *name = pWal->name;
terrno = 0;
char *buffer = malloc(1024000); // size for one record
if (buffer == NULL) return -1;
if (buffer == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
return terrno;
}
SWalHead *pHead = (SWalHead *)buffer;
int fd = open(name, O_RDONLY);
if (fd < 0) {
wError("wal:%s, failed to open for restore(%s)", name, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
free(buffer);
return -1;
return terrno;
}
wTrace("wal:%s, start to restore", name);
while (1) {
int ret = read(fd, pHead, sizeof(SWalHead));
if ( ret == 0) { code = 0; break;}
if ( ret == 0) break;
if (ret != sizeof(SWalHead)) {
wWarn("wal:%s, failed to read head, skip, ret:%d(%s)", name, ret, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
break;
}
if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SWalHead))) {
wWarn("wal:%s, cksum is messed up, skip the rest of file", name);
terrno = TAOS_SYSTEM_ERROR(errno);
break;
}
ret = read(fd, pHead->cont, pHead->len);
if ( ret != pHead->len) {
wWarn("wal:%s, failed to read body, skip, len:%d ret:%d", name, pHead->len, ret);
terrno = TAOS_SYSTEM_ERROR(errno);
break;
}
......@@ -336,11 +353,10 @@ static int walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp) {
close(fd);
free(buffer);
return code;
return terrno;
}
int walHandleExistingFiles(const char *path) {
int code = 0;
char oname[TSDB_FILENAME_LEN * 3];
char nname[TSDB_FILENAME_LEN * 3];
char opath[TSDB_FILENAME_LEN];
......@@ -350,6 +366,7 @@ int walHandleExistingFiles(const char *path) {
struct dirent *ent;
DIR *dir = opendir(path);
int plen = strlen(walPrefix);
terrno = 0;
if (access(opath, F_OK) == 0) {
// old directory is there, it means restore process is not finished
......@@ -360,13 +377,19 @@ int walHandleExistingFiles(const char *path) {
int count = 0;
while ((ent = readdir(dir))!= NULL) {
if ( strncmp(ent->d_name, walPrefix, plen) == 0) {
if (access(opath, F_OK) != 0) mkdir(opath, 0755);
sprintf(oname, "%s/%s", path, ent->d_name);
sprintf(nname, "%s/old/%s", path, ent->d_name);
if (access(opath, F_OK) != 0) {
if (mkdir(opath, 0755) != 0) {
wError("wal:%s, failed to create directory:%s(%s)", oname, opath, strerror(errno));
terrno = TAOS_SYSTEM_ERROR(errno);
break;
}
}
if (rename(oname, nname) < 0) {
wError("wal:%s, failed to move to new:%s", oname, nname);
code = -1;
terrno = TAOS_SYSTEM_ERROR(errno);
break;
}
......@@ -378,14 +401,14 @@ int walHandleExistingFiles(const char *path) {
}
closedir(dir);
return code;
return terrno;
}
static int walRemoveWalFiles(const char *path) {
int plen = strlen(walPrefix);
char name[TSDB_FILENAME_LEN * 3];
int code = 0;
terrno = 0;
if (access(path, F_OK) != 0) return 0;
struct dirent *ent;
......@@ -396,13 +419,13 @@ static int walRemoveWalFiles(const char *path) {
sprintf(name, "%s/%s", path, ent->d_name);
if (remove(name) <0) {
wError("wal:%s, failed to remove(%s)", name, strerror(errno));
code = -1; break;
terrno = TAOS_SYSTEM_ERROR(errno);
}
}
}
closedir(dir);
return code;
return terrno;
}
#!/bin/bash
# insert
python3 ./test.py -g -f insert/basic.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/int.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/float.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/bigint.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/bool.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/double.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/smallint.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/tinyint.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/binary.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/date.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/nchar.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f insert/multi.py
python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/basic.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/int.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/float.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/bigint.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/bool.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/double.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/smallint.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/tinyint.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/binary.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/date.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/nchar.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f insert/multi.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
# table
python3 ./test.py -g -f table/column_name.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f table/column_num.py
python3 ./test.py -g -s && sleep 1
python3 ./test.py -g -f table/db_table.py
python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f table/column_name.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f table/column_num.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f table/db_table.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
# import
python3 ./test.py -g -f import_merge/importDataLastSub.py
python3 ./test.py -g -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py -g -f import_merge/importDataLastSub.py
PYTHONMALLOC=malloc python3 ./test.py -g -s && sleep 1
#tag
python3 ./test.py $1 -f tag_lite/filter.py
python3 ./test.py $1 -s && sleep 1
PYTHONMALLOC=malloc python3 ./test.py $1 -f tag_lite/filter.py
PYTHONMALLOC=malloc python3 ./test.py $1 -s && sleep 1
......@@ -2,11 +2,11 @@
# step 1: start dnode1
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode)
# step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841)
# step 4: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
# step 4: insert old data(now-20d) and new data(now-40d), control data rows in order to save in cache, not falling disc
# step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2
# step 6: insert two data rows: now-16d, now+16d
# step 6: insert two data rows: now-21d, now-41d
# step 7: restart dnode2, waiting sync end
# expect: in dnode2, the files 1840 and 1842 will be removed
# expect: in dnode2, the files 1837 and 1839 will be removed
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
......@@ -14,10 +14,10 @@ system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode2 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode3 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode4 -c numOfMPeers -v 1
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
......@@ -39,11 +39,6 @@ system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
......@@ -64,16 +59,16 @@ sql connect
print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data
system sh/exec.sh -n dnode2 -s start
#system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname2
#sql create dnode $hostname3
sql create dnode $hostname3
sleep 3000
$totalTableNum = 1
$sleepTimer = 3000
$db = db
sql create database $db replica 1 cache 1
sql create database $db replica 2 cache 1
sql use $db
# create table , insert data
......@@ -82,7 +77,7 @@ sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 130000
$tblNum = $totalTableNum
$totalRows = 0
#$tsStart = 1420041600000
$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
# insert over 2M data in order to falling disc, generate one file
$i = 0
......@@ -102,19 +97,24 @@ while $i < $tblNum
endw
sql select count(*) from $stb
sleep 1000
print data00 $data00
if $data00 != $totalRows then
return -1
print rows:$rows data00:$data00
if $rows != 1 then
return -1
endi
if $data00 == 0 then
return -1
endi
$totalRows = $data00
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 15d , -15 )
sql insert into $tb values ( now + 15d , 15 )
sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now - 40d , -40 )
$totalRows = $totalRows + 2
print ============== step4: stop dnode2, so date rows falling disc, generate two new files in dnode2
system sh/exec.sh -n dnode2 -s stop
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep $sleepTimer
wait_dnode2_offline:
......@@ -153,48 +153,14 @@ if $data00 != $totalRows then
endi
print ============== step5: insert two data rows: now-16d, now+16d,
sql insert into $tb values ( now - 16d , -16 )
sql insert into $tb values ( now + 16d , 16 )
sql insert into $tb values ( now - 21d , -21 )
sql insert into $tb values ( now - 41d , -41 )
$totalRows = $totalRows + 2
return 1
print ============== step5: restart dnode2, waiting sync end
system sh/exec.sh -n dnode2 -s start
sleep 3000
wait_dnode2_ready:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
sleep $sleepTimer
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step6: in dnode2, the files 1840 and 1842 will be removed
# how check in script ???
\ No newline at end of file
print ============== step6: please check there should be 3 file in sim/dnode2/data/vnode/vnode2/tsdb/data/, and 1 file sim/dnode3/data/vnode/vnode2/tsdb/data/
# Test case describe: dnode1 is only mnode, dnode2/dnode3 are only vnode
# step 1: start dnode1
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode)
# step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841)
# step 4: insert old data(now-20d) and new data(now-40d), control data rows in order to save in cache, not falling disc
# step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2
# step 6: insert two data rows: now-21d, now-41d
# step 7: restart dnode2, waiting sync end
# expect: in dnode2, the files 1837 and 1839 will be removed
sql connect
sleep 3000
print ============== step7: restart dnode2, waiting sync end
system sh/exec.sh -n dnode2 -s start
sleep 3000
wait_dnode2_ready:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
sleep 1000
# check using select
$db = db
$stb = stb
sql use $db
sql select count(*) from $stb
print data00 $data00, should equal to dn2_mn1_cache_file_sync.sim output
#if $data00 != $totalRows then
# return -1
#endi
print ============== step8: please check there should be 1 file in sim/dnode2/data/vnode/vnode2/tsdb/data/, and 1 file sim/dnode3/data/vnode/vnode2/tsdb/data/
......@@ -23,28 +23,39 @@ system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode5 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 200
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode5 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 200
#system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 200
#system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 200
#system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 200
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode5 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode1 -c offlineThreshold -v 20
system sh/cfg.sh -n dnode2 -c offlineThreshold -v 20
system sh/cfg.sh -n dnode3 -c offlineThreshold -v 20
system sh/cfg.sh -n dnode4 -c offlineThreshold -v 20
system sh/cfg.sh -n dnode5 -c offlineThreshold -v 20
system sh/cfg.sh -n dnode1 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode2 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode4 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode5 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode1 -c enableCoreFile -v 1
system sh/cfg.sh -n dnode2 -c enableCoreFile -v 1
......@@ -63,9 +74,9 @@ sql connect
print ============== step2: start dnode2/dnode3 and add into cluster, then create database, create table , and insert data
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
sleep 1000
sql create dnode $hostname2
sql create dnode $hostname3
sleep 3000
$rowNum = 100
$tblNum = 16
......@@ -151,7 +162,7 @@ endi
# return -1
#endi
sleep 30000
sleep 15000
wait_drop:
sql show dnodes
......@@ -174,10 +185,69 @@ endi
if $dnode2Status != ready then
return -1
endi
if $dnode3Status != null then
return -1
endi
if $dnode4Status != ready then
return -1
endi
print ============== step4-1: restart dnode3, adn add into cluster
system rm -rf ../../sim/dnode3
sleep 3000
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c walLevel -v 1
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c offlineThreshold -v 10
system sh/cfg.sh -n dnode3 -c enableCoreFile -v 1
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3
sleep 3000
wait_dnode3_ready:
sql show dnodes
print rows: $rows
if $rows != 4 then
sleep 3000
goto wait_dnode3_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
$dnode5Status = $data4_5
if $dnode1Status != ready then
return -1
endi
if $dnode2Status != ready then
return -1
endi
if $dnode3Status != null then
return -1
endi
if $dnode4Status != ready then
return -1
endi
if $dnode5Status != ready then
return -1
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step5: start dnode5 and add into cluster , drop database
sql drop database $db
......@@ -187,7 +257,7 @@ sql create dnode $hostname5
sleep 3000
wait_dnode5:
sql show dnodes
if $rows != 4 then
if $rows != 5 then
sleep 3000
goto wait_dnode5
endi
......@@ -196,10 +266,12 @@ print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode4Status = $data4_4
$dnode5Status = $data4_5
$dnode5Status = $data4_5
$dnode6Status = $data4_6
if $dnode1Status != ready then
return -1
......@@ -207,17 +279,22 @@ endi
if $dnode2Status != ready then
return -1
endi
if $dnode3Status != null then
return -1
endi
if $dnode4Status != ready then
return -1
endi
if $dnode5Status != ready then
return -1
endi
if $dnode6Status != ready then
return -1
endi
print ============== step6: create database and table until not free vnodes
$rowNum = 100
$tblNum = 24
$tblNum = 32
$totalRows = 0
$tsStart = 1420041600000
......@@ -259,8 +336,9 @@ if $data00 != $totalRows then
return -1
endi
print ============== step7: drop dnode3, and system should prompt cannot drop dnodes
sql_error drop dnode $hostname3
print ============== step8: add one new table, and system should prompt 'need more dnode'
print ============== step7: drop dnode $hostname5, system should prompt "DB error: no enough dnodes"
sql_error drop dnode $hostname5
print error: $error
print ============== step8: create table tb_more using $stb tags( 1000 ), system should prompt 'DB error: no enough dnodes'
sql_error create table tb_more using $stb tags( 1000 )
print error: $error
......@@ -89,11 +89,8 @@ if $data00 != $totalRows then
return -1
endi
print ============== step2-1: start dnode2 for falling disc, then restart dnode2, and check rows
system sh/exec.sh -n dnode2 -s stop
print ============== step2-1: stop dnode2 for falling disc, then restart dnode2, and check rows
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep $sleepTimer
wait_dnode2_offline_0:
sql show dnodes
......@@ -151,10 +148,6 @@ if $data00 != $totalRows then
endi
print ============== step3: start dnode3 and add into cluster , then alter replica from 1 to 2, and waiting sync
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname3
......
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1, only deploy mnode
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
print ============== step2: start dnode2/dnode3/dnode4 and add into cluster , then create database with replica 3, and create table, insert data
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode4 -s start
sql create dnode $hostname2
sql create dnode $hostname3
sql create dnode $hostname4
sleep 3000
$totalTableNum = 100
$sleepTimer = 3000
$db = db
sql create database $db replica 3 maxTables $totalTableNum
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 int) tags(t1 int)
$rowNum = 100
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1420041600000
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
$ts = $tsStart + $x
sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
$x = $x + 60
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
sleep 1000
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step3: stop dnode2, and remove its vnodeX subdirector
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep $sleepTimer
wait_dnode2_offline_0:
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode2_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != offline then
sleep 2000
goto wait_dnode2_offline_0
endi
system rm -rf ../../../sim/dnode2/data/vnode/*
sleep 1000
print ============== step4: restart dnode2, waiting sync end
system sh/exec.sh -n dnode2 -s start
sleep $sleepTimer
wait_dnode2_reready:
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode2_reready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_reready
endi
print ============== step5: stop dnode3/dnode4, and check rows
system sh/exec.sh -n dnode3 -s stop
system sh/exec.sh -n dnode4 -s stop
sleep $sleepTimer
wait_dnode34_offline:
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode34_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode34_offline
endi
if $dnode3Status != offline then
sleep 2000
goto wait_dnode34_offline
endi
if $dnode4Status != offline then
sleep 2000
goto wait_dnode34_offline
endi
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
sql insert into $tb values ( now , 20000 ) ( now + 1a, 20001 ) ( now + 2a, 20002 )
$totalRows = $totalRows + 3
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
# Test case describe: dnode1 is only mnode, dnode2/dnode3 are only vnode
# step 1: start dnode1
# step 2: start dnode2 and dnode3, and all added into cluster (Suppose dnode2 is master-vnode)
# step 3: create db, table, insert data, and Falling disc into file (control only one file, e.g. 1841)
# step 4: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
# step 5: stop dnode2, so date rows falling disc, generate two new files 1840, 1842 in dnode2
# step 6: insert two data rows: now-16d, now+16d
# step 7: restart dnode2, waiting sync end
# expect: in dnode2, the files 1840 and 1842 will be removed
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 2
system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1, only deploy mnode
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname2
sql create dnode $hostname3
sleep 3000
$totalTableNum = 1
$sleepTimer = 3000
$db = db
sql create database $db replica 2 cache 1
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 130000
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
# insert over 2M data in order to falling disc, generate one file
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
# $ts = $tsStart + $x
sql insert into $tb values ( now + 0s , $x ) ( now + 1s , $x ) ( now + 2s , $x ) ( now + 3s , $x ) ( now + 4s , $x ) ( now + 5s , $x ) ( now + 6s , $x ) ( now + 7s , $x ) ( now + 8s , $x ) ( now + 9s , $x )
$x = $x + 10
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
print rows:$rows data00:$data00
if $rows != 1 then
return -1
endi
if $data00 == 0 then
return -1
endi
$totalRows = $data00
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now - 40d , -40 )
$totalRows = $totalRows + 2
print ============== step4: stop dnode2, so date rows falling disc, generate two new files in dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sleep $sleepTimer
wait_dnode2_offline:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != offline then
sleep 2000
goto wait_dnode2_offline
endi
if $dnode3Status != ready then
sleep 2000
goto wait_dnode2_offline
endi
sleep $sleepTimer # waitting for move master vnode of dnode2 to dnode3
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step5: insert two data rows: now-16d, now+16d,
sql insert into $tb values ( now - 21d , -21 )
sql insert into $tb values ( now - 41d , -41 )
$totalRows = $totalRows + 2
print ============== step5: restart dnode2, waiting sync end
system sh/exec.sh -n dnode2 -s start
sleep 3000
wait_dnode2_ready:
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
sleep $sleepTimer
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
......@@ -2,18 +2,42 @@ system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode2 -c walLevel -v 1
system sh/cfg.sh -n dnode3 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 8
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
#system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
#system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
#system sh/cfg.sh -n dnode4 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
system sh/cfg.sh -n dnode3 -c alternativeRole -v 0
system sh/cfg.sh -n dnode4 -c alternativeRole -v 0
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
......@@ -22,21 +46,23 @@ system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: replica is 1, and start 1 dnode
print ============== step1: replica is 1, and start 1 dnode, then create tables and insert data
system sh/exec.sh -n dnode1 -s start
sleep 3000
sql connect
$db = replica_db1
sql create database $db replica 1 maxTables 4
$totalTableNum = 12
$db = db
sql create database $db replica 1 maxTables $totalTableNum
sql use $db
# create table , insert data
$stb = repl_stb
$stb = stb
sql create table $stb (ts timestamp, c1 int) tags(t1 int)
$rowNum = 10
$tblNum = 12
$tblNum = $totalTableNum
$totalRows = $rowNum * $tblNum
$ts0 = 1420041600000
$ts = $ts0
......@@ -55,46 +81,136 @@ while $i < $tblNum
$x = $x + 1
endw
$i = $i + 1
print $tb inserted rows: $x
endw
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step2: add 1 new dnode, expect balanced
system sh/exec.sh -n dnode2 -s start
sql create dnode $hostname2
sleep 3000
# expect after balanced, 2 vondes in dnode1, 1 vonde in dnode2
$x = 0
show2:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
wait_dnode2_ready:
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
if $data2_1 != 2 then
goto show2
if $rows != 2 then
sleep 2000
goto wait_dnode2_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode1Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
if $data2_2 != 1 then
goto show2
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready
endi
print ============== step4: stop dnode1, and wait dnode2 master
print ============== step3: stop dnode1/dnode2, modify cfg mpeers to 2, and restart dnode1/dnode2
system sh/exec.sh -n dnode1 -s stop
system sh/exec.sh -n dnode2 -s stop
sleep 3000
$x = 0
loop_wait:
$x = $x + 1
sleep 2000
if $x == 10 then
print ERROR: after dnode1 stop, dnode2 didn't become a master!
return -1
endi
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 1
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 5000
print ============= step4: wait dnode ready
wait_dnode_ready:
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $dnode1Status != ready then
sleep 2000
goto wait_dnode_ready
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode_ready
endi
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
print ============== step5: stop dnode1
system sh/exec.sh -n dnode1 -s stop
sleep 3000
wait_dnode2_master:
sql show mnodes
$dnodeRole = $data2_1
print dnodeRole ==> $dnodeRole
if $rows != 2 then
sleep 2000
goto wait_dnode2_master
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$dnode1Status = $data4_1
$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#$dnode5Status = $data4_5
if $data2_1 != offline then
sleep 2000
goto wait_dnode2_master
endi
if $data2_2 != master then
sleep 2000
goto wait_dnode2_master
endi
if $dnodeRole != master then
goto loop_wait
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册