提交 21c77738 编写于 作者: P Ping Xiao

Merge branch 'develop' into jdbcfixes

......@@ -24,3 +24,11 @@ ENDIF ()
IF (TD_MEM_CHECK)
ADD_DEFINITIONS(-DTAOS_MEM_CHECK)
ENDIF ()
IF (TD_RANDOM_FILE_FAIL)
ADD_DEFINITIONS(-DTAOS_RANDOM_FILE_FAIL)
ENDIF ()
IF (TD_RANDOM_NETWORK_FAIL)
ADD_DEFINITIONS(-DTAOS_RANDOM_NETWORK_FAIL)
ENDIF ()
......@@ -30,4 +30,14 @@ ENDIF ()
IF (${MEM_CHECK} MATCHES "true")
SET(TD_MEM_CHECK TRUE)
MESSAGE(STATUS "build with memory check")
ENDIF ()
\ No newline at end of file
ENDIF ()
IF (${RANDOM_FILE_FAIL} MATCHES "true")
SET(TD_RANDOM_FILE_FAIL TRUE)
MESSAGE(STATUS "build with random-file-fail enabled")
ENDIF ()
IF (${RANDOM_NETWORK_FAIL} MATCHES "true")
SET(TD_RANDOM_NETWORK_FAIL TRUE)
MESSAGE(STATUS "build with random-network-fail enabled")
ENDIF ()
......@@ -480,9 +480,9 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数
- **LEASTSQUARES**
```mysql
SELECT LEASTSQUARES(field_name) FROM tb_name [WHERE clause]
SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
```
功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。
功能说明:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val是自变量初始值,step_val是自变量的步长值。
返回结果数据类型:字符串表达式(斜率, 截距)。
应用字段:不能应用在timestamp、binary、nchar、bool类型字段。
说明:自变量是时间戳,因变量是该列的值。
......
......@@ -412,7 +412,7 @@ TDengine supports aggregations over numerical values, they are listed below:
SELECT PERCENTILE(field_name, P) FROM { tb_name | stb_name } [WHERE clause]
```
Function: the value of the specified column below which `P` percent of the data points fall.
Return Data Type: the same data type.
Return Data Type: double.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable.
Note: The range of `P` is `[0, 100]`. When `P=0` , `PERCENTILE` returns the equal value as `MIN`; when `P=100`, `PERCENTILE` returns the equal value as `MAX`.
......@@ -446,7 +446,7 @@ TDengine supports aggregations over numerical values, they are listed below:
SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]
```
Function: return the difference between the maximum and the mimimum value.
Return Data Type: the same data type.
Return Data Type: double.
Applicable Data Types: all types except `timestamp`, `binary`, `nchar`, `bool`.
Applied to: table/STable.
Note: spread gives the range of data variation in a table/supertable; it is equivalent to `MAX()` - `MIN()`
......
......@@ -31,9 +31,7 @@ extern int32_t tscEmbedded;
#define tscInfo(...) { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC INFO ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }}
#define tscDebug(...) { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC DEBUG ", cDebugFlag, __VA_ARGS__); }}
#define tscTrace(...) { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC TRACE ", cDebugFlag, __VA_ARGS__); }}
#define tscDebugDump(...) { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC DEBUG ", cDebugFlag, __VA_ARGS__); }}
#define tscTraceDump(...) { if (cDebugFlag & DEBUG_TRACE) { taosPrintLongString("TSC TRACE ", cDebugFlag, __VA_ARGS__); }}
#define tscDebugL(...){ if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC DEBUG ", cDebugFlag, __VA_ARGS__); }}
#ifdef __cplusplus
}
......
......@@ -29,9 +29,6 @@
#define jniDebug(...) { if (jniDebugFlag & DEBUG_DEBUG) { taosPrintLog("JNI DEBUG ", jniDebugFlag, __VA_ARGS__); }}
#define jniTrace(...) { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLog("JNI TRACE ", jniDebugFlag, __VA_ARGS__); }}
#define jniDebugDump(...) { if (jniDebugFlag & DEBUG_DEBUG) { taosPrintLongString("JNI DEBUG ", jniDebugFlag, __VA_ARGS__); }}
#define jniTraceDump(...) { if (jniDebugFlag & DEBUG_TRACE) { taosPrintLongString("JNI DEBUG ", jniDebugFlag, __VA_ARGS__); }}
int __init = 0;
JavaVM *g_vm = NULL;
......
......@@ -55,7 +55,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
strtolower(pSql->sqlstr, sqlstr);
tscDebugDump("%p SQL: %s", pSql, pSql->sqlstr);
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
pSql->cmd.curSql = pSql->sqlstr;
int32_t code = tsParseSql(pSql, true);
......@@ -471,7 +471,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
}
// in case of insert, redo parsing the sql string and build new submit data block for two reasons:
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated
// 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly.
// 2. vnode may need the schema information along with submit block to update its local table schema.
if (pCmd->command == TSDB_SQL_INSERT) {
tscDebug("%p redo parse sql string to build submit block", pSql);
......
......@@ -406,7 +406,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
pSql->res.qhandle = 0x1;
pSql->res.numOfRows = 0;
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
taosCacheEmpty(tscCacheHandle);
taosCacheEmpty(tscCacheHandle,false);
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
tscProcessServerVer(pSql);
} else if (pCmd->command == TSDB_SQL_CLI_VERSION) {
......
......@@ -364,7 +364,7 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
TSKEY stime = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
......@@ -831,7 +831,7 @@ void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SQueryInfo
if (pFillInfo != NULL) {
int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
taosResetFillInfo(pFillInfo, revisedSTime);
}
......@@ -1301,9 +1301,7 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
TSKEY skey = MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t newTime =
taosGetIntervalStartTimestamp(skey, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision);
// taosResetFillInfo(pLocalReducer->pFillInfo, pQueryInfo->order.order, newTime,
// pQueryInfo->groupbyExpr.numOfGroupCols, 4096, 0, NULL, pLocalReducer->rowSize);
taosGetIntervalStartTimestamp(skey, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision);
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
}
}
......
......@@ -538,7 +538,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pRes->numOfRows = 1;
strtolower(pSql->sqlstr, sql);
tscDebugDump("%p SQL: %s", pSql, pSql->sqlstr);
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
if (tscIsInsertData(pSql->sqlstr)) {
pStmt->isInsert = true;
......
......@@ -4487,10 +4487,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
SUpdateTableTagValMsg* pUpdateMsg = (SUpdateTableTagValMsg*) pCmd->payload;
pUpdateMsg->head.vgId = htonl(pTableMeta->vgroupInfo.vgId);
pUpdateMsg->tid = htonl(pTableMeta->sid);
pUpdateMsg->uid = htobe64(pTableMeta->uid);
pUpdateMsg->colId = htons(pTagsSchema->colId);
pUpdateMsg->tversion = htons(pTableMeta->tversion);
pUpdateMsg->tid = htonl(pTableMeta->sid);
pUpdateMsg->uid = htobe64(pTableMeta->uid);
pUpdateMsg->colId = htons(pTagsSchema->colId);
pUpdateMsg->type = pTagsSchema->type;
pUpdateMsg->bytes = htons(pTagsSchema->bytes);
pUpdateMsg->tversion = htons(pTableMeta->tversion);
pUpdateMsg->numOfTags = htons(numOfTags);
pUpdateMsg->schemaLen = htonl(schemaLen);
......
......@@ -247,7 +247,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
} else {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
if (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID ||
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
if (pCmd->command == TSDB_SQL_CONNECT) {
rpcMsg->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
rpcFreeCont(rpcMsg->pCont);
......@@ -260,7 +260,12 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
// get table meta query will not retry, do nothing
} else {
tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry);
// set the flag to denote that sql string needs to be re-parsed and build submit block with table schema
if (rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
pSql->cmd.submitSchema = 1;
}
pSql->res.code = rpcMsg->code; // keep the previous error code
if (pSql->retry > pSql->maxRetry) {
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
......@@ -1950,7 +1955,7 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
}
int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) {
taosCacheEmpty(tscCacheHandle);
taosCacheEmpty(tscCacheHandle, false);
return 0;
}
......@@ -1996,7 +2001,7 @@ int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
if (isSuperTable) { // if it is a super table, reset whole query cache
tscDebug("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name);
taosCacheEmpty(tscCacheHandle);
taosCacheEmpty(tscCacheHandle, false);
}
}
......
......@@ -503,7 +503,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
}
strtolower(pSql->sqlstr, sqlstr);
tscDebugDump("%p SQL: %s", pSql, pSql->sqlstr);
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
tsem_init(&pSql->rspSem, 0, 0);
int32_t code = tsParseSql(pSql, true);
......
......@@ -579,9 +579,9 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo
int32_t numOfCols = tscGetNumOfColumns(pTableDataBlock->pTableMeta);
for(int32_t j = 0; j < numOfCols; ++j) {
STColumn* pCol = (STColumn*) pDataBlock;
pCol->colId = pSchema[j].colId;
pCol->colId = htons(pSchema[j].colId);
pCol->type = pSchema[j].type;
pCol->bytes = pSchema[j].bytes;
pCol->bytes = htons(pSchema[j].bytes);
pCol->offset = 0;
pDataBlock += sizeof(STColumn);
......@@ -663,7 +663,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
}
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize;
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
if (dataBuf->nAllocSize < destSize) {
while (dataBuf->nAllocSize < destSize) {
......@@ -691,7 +691,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
tscDebug("%p tableId:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->tableId,
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize);
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta);
pBlocks->tid = htonl(pBlocks->tid);
pBlocks->uid = htobe64(pBlocks->uid);
......
......@@ -29,4 +29,6 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
#endif // TDENGINE_NAME_H
......@@ -32,9 +32,6 @@ extern int32_t tscEmbedded;
#define uDebug(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLog("UTL DEBUG ", uDebugFlag, __VA_ARGS__); }}
#define uTrace(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLog("UTL TRACE ", uDebugFlag, __VA_ARGS__); }}
#define uDebugDump(...) { if (uDebugFlag & DEBUG_DEBUG) { taosPrintLongString("UTL DEBUG ", uDebugFlag, __VA_ARGS__); }}
#define uTraceDump(...) { if (uDebugFlag & DEBUG_TRACE) { taosPrintLongString("UTL TRACE ", uDebugFlag, __VA_ARGS__); }}
#define pError(...) { taosPrintLog("APP ERROR ", 255, __VA_ARGS__); }
#define pPrint(...) { taosPrintLog("APP INFO ", 255, __VA_ARGS__); }
......
......@@ -1210,7 +1210,7 @@ void taosInitGlobalCfg() {
}
bool taosCheckGlobalCfg() {
if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG) {
if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) {
taosSetAllDebugFlag();
}
......
......@@ -75,3 +75,33 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
return pFilter;
}
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
return startTime;
}
int64_t start = ((startTime - intervalTime) / slidingTime + 1) * slidingTime;
if (!(timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h')) {
/*
* here we revised the start time of day according to the local time zone,
* but in case of DST, the start time of one day need to be dynamically decided.
*/
// todo refactor to extract function that is available for Linux/Windows/Mac platform
#if defined(WINDOWS) && _MSC_VER >= 1900
// see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
int64_t timezone = _timezone;
int32_t daylight = _daylight;
char** tzname = _tzname;
#endif
int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L;
start += timezone * t;
}
int64_t end = start + intervalTime - 1;
if (end < startTime) {
start += slidingTime;
}
return start;
}
......@@ -106,6 +106,12 @@ int32_t dnodeInitMgmt() {
}
}
int32_t code = vnodeInitResources();
if (code != TSDB_CODE_SUCCESS) {
dnodeCleanupMgmt();
return -1;
}
// create the queue and thread to handle the message
tsMgmtQset = taosOpenQset();
if (tsMgmtQset == NULL) {
......@@ -127,7 +133,7 @@ int32_t dnodeInitMgmt() {
pthread_attr_init(&thAttr);
pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE);
int32_t code = pthread_create(&tsQthread, &thAttr, dnodeProcessMgmtQueue, NULL);
code = pthread_create(&tsQthread, &thAttr, dnodeProcessMgmtQueue, NULL);
pthread_attr_destroy(&thAttr);
if (code != 0) {
dError("failed to create thread to process mgmt queue, reason:%s", strerror(errno));
......@@ -176,6 +182,7 @@ void dnodeCleanupMgmt() {
tsMgmtQset = NULL;
tsMgmtQueue = NULL;
vnodeCleanupResources();
}
void dnodeDispatchToMgmtQueue(SRpcMsg *pMsg) {
......@@ -242,8 +249,14 @@ static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) {
int32_t vnode = atoi(de->d_name + 5);
if (vnode == 0) continue;
vnodeList[*numOfVnodes] = vnode;
(*numOfVnodes)++;
if (*numOfVnodes >= TSDB_MAX_VNODES) {
dError("vgId:%d, too many vnode directory in disk, exist:%d max:%d", vnode, *numOfVnodes, TSDB_MAX_VNODES);
continue;
} else {
vnodeList[*numOfVnodes - 1] = vnode;
}
}
}
closedir(dir);
......@@ -275,13 +288,12 @@ static void *dnodeOpenVnode(void *param) {
}
static int32_t dnodeOpenVnodes() {
int32_t *vnodeList = calloc(TSDB_MAX_VNODES, sizeof(int32_t));
int32_t numOfVnodes;
int32_t vnodeList[TSDB_MAX_VNODES] = {0};
int32_t numOfVnodes = 0;
int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
if (status != TSDB_CODE_SUCCESS) {
dInfo("get dnode list failed");
free(vnodeList);
return status;
}
......@@ -327,7 +339,6 @@ static int32_t dnodeOpenVnodes() {
free(pThread->vnodeList);
}
free(vnodeList);
free(threads);
dInfo("there are total vnodes:%d, openned:%d failed:%d", numOfVnodes, openVnodes, failedVnodes);
......@@ -335,9 +346,9 @@ static int32_t dnodeOpenVnodes() {
}
void dnodeStartStream() {
int32_t vnodeList[TSDB_MAX_VNODES];
int32_t vnodeList[TSDB_MAX_VNODES] = {0};
int32_t numOfVnodes = 0;
int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
int32_t status = vnodeGetVnodeList(vnodeList, &numOfVnodes);
if (status != TSDB_CODE_SUCCESS) {
dInfo("get dnode list failed");
......@@ -352,15 +363,14 @@ void dnodeStartStream() {
}
static void dnodeCloseVnodes() {
int32_t *vnodeList = (int32_t *)malloc(sizeof(int32_t) * TSDB_MAX_VNODES);
int32_t numOfVnodes;
int32_t vnodeList[TSDB_MAX_VNODES]= {0};
int32_t numOfVnodes = 0;
int32_t status;
status = dnodeGetVnodeList(vnodeList, &numOfVnodes);
status = vnodeGetVnodeList(vnodeList, &numOfVnodes);
if (status != TSDB_CODE_SUCCESS) {
dInfo("get dnode list failed");
free(vnodeList);
return;
}
......@@ -368,7 +378,6 @@ static void dnodeCloseVnodes() {
vnodeClose(vnodeList[i]);
}
free(vnodeList);
dInfo("total vnodes:%d are all closed", numOfVnodes);
}
......@@ -391,7 +400,7 @@ static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) {
pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId);
}
void *pVnode = vnodeAccquireVnode(pCreate->cfg.vgId);
void *pVnode = vnodeAcquireVnode(pCreate->cfg.vgId);
if (pVnode != NULL) {
int32_t code = vnodeAlter(pVnode, pCreate);
vnodeRelease(pVnode);
......
......@@ -98,11 +98,7 @@ void dnodeDispatchToVnodeReadQueue(SRpcMsg *pMsg) {
pHead->vgId = htonl(pHead->vgId);
pHead->contLen = htonl(pHead->contLen);
if (pMsg->msgType == TSDB_MSG_TYPE_FETCH) {
pVnode = vnodeGetVnode(pHead->vgId);
} else {
pVnode = vnodeAccquireVnode(pHead->vgId);
}
pVnode = vnodeAcquireVnode(pHead->vgId);
if (pVnode == NULL) {
leftLen -= pHead->contLen;
......@@ -179,24 +175,19 @@ void dnodeFreeVnodeRqueue(void *rqueue) {
// dynamically adjust the number of threads
}
static void dnodeContinueExecuteQuery(void* pVnode, void* qhandle, SReadMsg *pMsg) {
void dnodePutItemIntoReadQueue(void *pVnode, void *qhandle) {
SReadMsg *pRead = (SReadMsg *)taosAllocateQitem(sizeof(SReadMsg));
pRead->rpcMsg = pMsg->rpcMsg;
pRead->pCont = qhandle;
pRead->contLen = 0;
pRead->rpcMsg.msgType = TSDB_MSG_TYPE_QUERY;
pRead->pCont = qhandle;
pRead->contLen = 0;
assert(pVnode != NULL);
taos_queue queue = vnodeAcquireRqueue(pVnode);
taos_queue queue = vnodeGetRqueue(pVnode);
taosWriteQitem(queue, TAOS_QTYPE_RPC, pRead);
taosWriteQitem(queue, TAOS_QTYPE_QUERY, pRead);
}
void dnodeSendRpcReadRsp(void *pVnode, SReadMsg *pRead, int32_t code) {
if (code == TSDB_CODE_VND_ACTION_IN_PROGRESS) return;
if (code == TSDB_CODE_VND_ACTION_NEED_REPROCESSED) {
dnodeContinueExecuteQuery(pVnode, pRead->rspRet.qhandle, pRead);
code = TSDB_CODE_SUCCESS;
}
SRpcMsg rpcRsp = {
.handle = pRead->rpcMsg.handle,
.pCont = pRead->rspRet.rsp,
......@@ -206,6 +197,12 @@ void dnodeSendRpcReadRsp(void *pVnode, SReadMsg *pRead, int32_t code) {
rpcSendResponse(&rpcRsp);
rpcFreeCont(pRead->rpcMsg.pCont);
vnodeRelease(pVnode);
}
void dnodeDispatchNonRspMsg(void *pVnode, SReadMsg *pRead, int32_t code) {
vnodeRelease(pVnode);
return;
}
static void *dnodeProcessReadQueue(void *param) {
......@@ -219,9 +216,16 @@ static void *dnodeProcessReadQueue(void *param) {
break;
}
dDebug("%p, msg:%s will be processed in vread queue", pReadMsg->rpcMsg.ahandle, taosMsg[pReadMsg->rpcMsg.msgType]);
dDebug("%p, msg:%s will be processed in vread queue, qtype:%d", pReadMsg->rpcMsg.ahandle,
taosMsg[pReadMsg->rpcMsg.msgType], type);
int32_t code = vnodeProcessRead(pVnode, pReadMsg);
dnodeSendRpcReadRsp(pVnode, pReadMsg, code);
if (type == TAOS_QTYPE_RPC) {
dnodeSendRpcReadRsp(pVnode, pReadMsg, code);
} else {
dnodeDispatchNonRspMsg(pVnode, pReadMsg, code);
}
taosFreeQitem(pReadMsg);
}
......
......@@ -210,6 +210,7 @@ static void *dnodeProcessWriteQueue(void *param) {
int32_t numOfMsgs;
int type;
void *pVnode, *item;
SRspRet *pRspRet;
dDebug("write worker:%d is running", pWorker->workerId);
......@@ -222,9 +223,11 @@ static void *dnodeProcessWriteQueue(void *param) {
for (int32_t i = 0; i < numOfMsgs; ++i) {
pWrite = NULL;
pRspRet = NULL;
taosGetQitem(pWorker->qall, &type, &item);
if (type == TAOS_QTYPE_RPC) {
pWrite = (SWriteMsg *)item;
pRspRet = &pWrite->rspRet;
pHead = (SWalHead *)(pWrite->pCont - sizeof(SWalHead));
pHead->msgType = pWrite->rpcMsg.msgType;
pHead->version = 0;
......@@ -234,7 +237,7 @@ static void *dnodeProcessWriteQueue(void *param) {
pHead = (SWalHead *)item;
}
int32_t code = vnodeProcessWrite(pVnode, type, pHead, item);
int32_t code = vnodeProcessWrite(pVnode, type, pHead, pRspRet);
if (pWrite) pWrite->rpcMsg.code = code;
}
......@@ -247,6 +250,11 @@ static void *dnodeProcessWriteQueue(void *param) {
if (type == TAOS_QTYPE_RPC) {
pWrite = (SWriteMsg *)item;
dnodeSendRpcVnodeWriteRsp(pVnode, item, pWrite->rpcMsg.code);
} else if (type == TAOS_QTYPE_FWD) {
pHead = (SWalHead *)item;
vnodeConfirmForward(pVnode, pHead->version, 0);
taosFreeQitem(item);
vnodeRelease(pVnode);
} else {
taosFreeQitem(item);
vnodeRelease(pVnode);
......
......@@ -53,6 +53,7 @@ void *dnodeAllocateVnodeWqueue(void *pVnode);
void dnodeFreeVnodeWqueue(void *queue);
void *dnodeAllocateVnodeRqueue(void *pVnode);
void dnodeFreeVnodeRqueue(void *rqueue);
void dnodePutItemIntoReadQueue(void *pVnode, void *qhandle);
void dnodeSendRpcVnodeWriteRsp(void *pVnode, void *param, int32_t code);
int32_t dnodeAllocateMnodePqueue();
......
......@@ -84,6 +84,13 @@ bool qHasMoreResultsToRetrieve(qinfo_t qinfo);
*/
int32_t qKillQuery(qinfo_t qinfo);
void* qOpenQueryMgmt(int32_t vgId);
void qSetQueryMgmtClosed(void* pExecutor);
void qCleanupQueryMgmt(void* pExecutor);
void** qRegisterQInfo(void* pMgmt, void* qInfo);
void** qAcquireQInfo(void* pMgmt, void** key);
void** qReleaseQInfo(void* pMgmt, void* pQInfo, bool needFree);
#ifdef __cplusplus
}
#endif
......
......@@ -365,6 +365,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TAOS_QTYPE_FWD 1
#define TAOS_QTYPE_WAL 2
#define TAOS_QTYPE_CQ 3
#define TAOS_QTYPE_QUERY 4
typedef enum {
TSDB_SUPER_TABLE = 0, // super table
......
......@@ -180,7 +180,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_DISK_PERMISSIONS, 0, 0x0506, "vnode no d
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, 0, 0x0507, "vnode no such file or directory")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, 0, 0x0508, "vnode out of memory")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "vnode app error")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0214, "vnode no write auth")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_STATUS, 0, 0x0510, "vnode not in ready state")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "vnode not in synced state")
TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "vnode no write auth")
// tsdb
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_ID, 0, 0x0600, "tsdb invalid table id")
......@@ -200,6 +202,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_ACTION, 0, 0x060D, "tsdb inval
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_CREATE_TB_MSG, 0, 0x060E, "tsdb invalid create table message")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM, 0, 0x060F, "tsdb no table data in memory skiplist")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_FILE_ALREADY_EXISTS, 0, 0x0610, "tsdb file already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECONFIGURE, 0, 0x0611, "tsdb need to reconfigure table")
// query
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INVALID_QHANDLE, 0, 0x0700, "query invalid handle")
......
......@@ -203,8 +203,7 @@ typedef struct SSubmitBlk {
typedef struct SSubmitMsg {
SMsgHead header;
int32_t length;
int32_t compressed : 2;
int32_t numOfBlocks : 30;
int32_t numOfBlocks;
SSubmitBlk blocks[];
} SSubmitMsg;
......@@ -285,6 +284,8 @@ typedef struct {
int32_t tid;
int16_t tversion;
int16_t colId;
int8_t type;
int16_t bytes;
int32_t tagValLen;
int16_t numOfTags;
int32_t schemaLen;
......
......@@ -108,12 +108,14 @@ void tsdbClearTableCfg(STableCfg *config);
void* tsdbGetTableTagVal(const void* pTable, int32_t colId, int16_t type, int16_t bytes);
char* tsdbGetTableName(void *pTable);
STableId tsdbGetTableId(void *pTable);
#define TSDB_TABLEID(_table) ((STableId*) (_table))
STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg);
int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg);
int tsdbDropTable(TSDB_REPO_T *pRepo, STableId tableId);
int tsdbUpdateTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg);
int tsdbUpdateTableTagValue(TSDB_REPO_T *repo, SUpdateTableTagValMsg *pMsg);
TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid);
void tsdbStartStream(TSDB_REPO_T *repo);
......
......@@ -49,17 +49,23 @@ int32_t vnodeAlter(void *pVnode, SMDCreateVnodeMsg *pVnodeCfg);
int32_t vnodeClose(int32_t vgId);
void vnodeRelease(void *pVnode);
void* vnodeAccquireVnode(int32_t vgId); // add refcount
void* vnodeAcquireVnode(int32_t vgId); // add refcount
void* vnodeGetVnode(int32_t vgId); // keep refcount unchanged
void* vnodeAcquireRqueue(void *);
void* vnodeGetRqueue(void *);
void* vnodeGetWqueue(int32_t vgId);
void* vnodeGetWal(void *pVnode);
int32_t vnodeProcessWrite(void *pVnode, int qtype, void *pHead, void *item);
void vnodeBuildStatusMsg(void * param);
int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes);
void vnodeBuildStatusMsg(void *param);
void vnodeConfirmForward(void *param, uint64_t version, int32_t code);
void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes);
int32_t vnodeInitResources();
void vnodeCleanupResources();
int32_t vnodeProcessRead(void *pVnode, SReadMsg *pReadMsg);
#ifdef __cplusplus
......
......@@ -32,6 +32,7 @@
#include <time.h>
#include <unistd.h>
#include <wordexp.h>
#include <regex.h>
#include "taos.h"
#include "tutil.h"
......@@ -54,6 +55,7 @@ static struct argp_option options[] = {
{0, 'P', "password", 0, "The password to use when connecting to the server. Default is 'taosdata'.", 3},
{0, 'd', "database", 0, "Destination database. Default is 'test'.", 3},
{0, 'm', "table_prefix", 0, "Table prefix name. Default is 't'.", 3},
{0, 's', "sql file", 0, "The select sql file.", 3},
{0, 'M', 0, 0, "Use metric flag.", 13},
{0, 'o', "outputfile", 0, "Direct output to the named file. Default is './output.txt'.", 14},
{0, 'q', "query_mode", 0, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC.", 6},
......@@ -79,6 +81,7 @@ typedef struct DemoArguments {
char *password;
char *database;
char *tb_prefix;
char *sqlFile;
bool use_metric;
bool insert_only;
char *output_file;
......@@ -120,6 +123,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
case 'o':
arguments->output_file = arg;
break;
case 's':
arguments->sqlFile = arg;
break;
case 'q':
arguments->mode = atoi(arg);
break;
......@@ -179,10 +185,10 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
arguments->tb_prefix = arg;
break;
case 'M':
arguments->use_metric = true;
arguments->use_metric = false;
break;
case 'x':
arguments->insert_only = true;
arguments->insert_only = false;
break;
case 'c':
if (wordexp(arg, &full_path, 0) != 0) {
......@@ -253,6 +259,9 @@ typedef struct {
int data_of_rate;
int64_t start_time;
bool do_aggreFunc;
char* cols;
bool use_metric;
sem_t mutex_sem;
int notFinished;
......@@ -305,6 +314,8 @@ void rand_string(char *str, int size);
double getCurrentTime();
void callBack(void *param, TAOS_RES *res, int code);
void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass);
void querySqlFile(TAOS* taos, char* sqlFile);
int main(int argc, char *argv[]) {
SDemoArguments arguments = { NULL, // host
......@@ -313,6 +324,7 @@ int main(int argc, char *argv[]) {
"taosdata", // password
"test", // database
"t", // tb_prefix
NULL,
false, // use_metric
false, // insert_only
"./output.txt", // output_file
......@@ -361,7 +373,7 @@ int main(int argc, char *argv[]) {
abort();
#endif
}
enum MODE query_mode = arguments.mode;
char *ip_addr = arguments.host;
uint16_t port = arguments.port;
......@@ -385,6 +397,13 @@ int main(int argc, char *argv[]) {
char dataString[STRING_LEN];
bool do_aggreFunc = true;
if (NULL != arguments.sqlFile) {
TAOS* qtaos = taos_connect(ip_addr, user, pass, db_name, port);
querySqlFile(qtaos, arguments.sqlFile);
taos_close(qtaos);
return 0;
}
memset(dataString, 0, STRING_LEN);
int len = 0;
......@@ -495,47 +514,19 @@ int main(int argc, char *argv[]) {
len += snprintf(cols + len, STRING_LEN - len, ",f%d %s(%d))", colIndex + 1, data_type[colIndex % count_data_type], len_of_binary);
}
if (!use_metric) {
/* Create all the tables; */
printf("Creating %d table(s)......\n", ntables);
for (int i = 0; i < ntables; i++) {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", db_name, tb_prefix, i, cols);
queryDB(taos, command);
}
printf("Table(s) created!\n");
taos_close(taos);
} else {
if (use_metric) {
/* Create metric table */
printf("Creating meters super table...\n");
snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols);
queryDB(taos, command);
printf("meters created!\n");
/* Create all the tables; */
printf("Creating %d table(s)......\n", ntables);
for (int i = 0; i < ntables; i++) {
int j;
if (i % 10 == 0) {
j = 10;
} else {
j = i % 10;
}
if (j % 2 == 0) {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", db_name, tb_prefix, i, db_name, j, "shanghai");
} else {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", db_name, tb_prefix, i, db_name, j, "beijing");
}
queryDB(taos, command);
}
printf("Table(s) created!\n");
taos_close(taos);
}
/* Wait for table to create */
multiThreadCreateTable(cols, use_metric, threads, ntables, db_name, tb_prefix, ip_addr, port, user, pass);
/* Insert data */
double ts = getCurrentTime();
printf("Inserting data......\n");
......@@ -685,6 +676,198 @@ int main(int argc, char *argv[]) {
return 0;
}
#define MAX_SQL_SIZE 65536
void selectSql(TAOS* taos, char* sqlcmd)
{
TAOS_RES *pSql = taos_query(taos, sqlcmd);
int32_t code = taos_errno(pSql);
if (code != 0) {
printf("Failed to sqlcmd:%s, reason:%s\n", sqlcmd, taos_errstr(pSql));
taos_free_result(pSql);
exit(1);
}
int count = 0;
while (taos_fetch_row(pSql) != NULL) {
count++;
}
taos_free_result(pSql);
return;
}
/* Function to do regular expression check */
static int regexMatch(const char *s, const char *reg, int cflags) {
regex_t regex;
char msgbuf[100] = {0};
/* Compile regular expression */
if (regcomp(&regex, reg, cflags) != 0) {
printf("Fail to compile regex\n");
exit(-1);
}
/* Execute regular expression */
int reti = regexec(&regex, s, 0, NULL, 0);
if (!reti) {
regfree(&regex);
return 1;
} else if (reti == REG_NOMATCH) {
regfree(&regex);
return 0;
} else {
regerror(reti, &regex, msgbuf, sizeof(msgbuf));
printf("Regex match failed: %s\n", msgbuf);
regfree(&regex);
exit(-1);
}
return 0;
}
static int isCommentLine(char *line) {
if (line == NULL) return 1;
return regexMatch(line, "^\\s*#.*", REG_EXTENDED);
}
void querySqlFile(TAOS* taos, char* sqlFile)
{
FILE *fp = fopen(sqlFile, "r");
if (fp == NULL) {
printf("failed to open file %s, reason:%s\n", sqlFile, strerror(errno));
exit(-1);
}
int read_len = 0;
char * cmd = calloc(1, MAX_SQL_SIZE);
size_t cmd_len = 0;
char * line = NULL;
size_t line_len = 0;
double t = getCurrentTime();
while ((read_len = getline(&line, &line_len, fp)) != -1) {
if (read_len >= MAX_SQL_SIZE) continue;
line[--read_len] = '\0';
if (read_len == 0 || isCommentLine(line)) { // line starts with #
continue;
}
if (line[read_len - 1] == '\\') {
line[read_len - 1] = ' ';
memcpy(cmd + cmd_len, line, read_len);
cmd_len += read_len;
continue;
}
memcpy(cmd + cmd_len, line, read_len);
selectSql(taos, cmd);
memset(cmd, 0, MAX_SQL_SIZE);
cmd_len = 0;
}
t = getCurrentTime() - t;
printf("run %s took %.6f second(s)\n\n", sqlFile, t);
free(cmd);
if (line) free(line);
fclose(fp);
return;
}
void * createTable(void *sarg)
{
char command[BUFFER_SIZE] = "\0";
info *winfo = (info *)sarg;
if (!winfo->use_metric) {
/* Create all the tables; */
printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", winfo->db_name, winfo->tb_prefix, i, winfo->cols);
queryDB(winfo->taos, command);
}
taos_close(winfo->taos);
} else {
/* Create all the tables; */
printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
for (int i = winfo->start_table_id; i <= winfo->end_table_id; i++) {
int j;
if (i % 10 == 0) {
j = 10;
} else {
j = i % 10;
}
if (j % 2 == 0) {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "shanghai");
} else {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d using %s.meters tags (%d,\"%s\");", winfo->db_name, winfo->tb_prefix, i, winfo->db_name, j, "beijing");
}
queryDB(winfo->taos, command);
}
taos_close(winfo->taos);
}
return NULL;
}
void multiThreadCreateTable(char* cols, bool use_metric, int threads, int ntables, char* db_name, char* tb_prefix, char *ip_addr, uint16_t port, char *user, char *pass) {
double ts = getCurrentTime();
printf("create table......\n");
pthread_t *pids = malloc(threads * sizeof(pthread_t));
info *infos = malloc(threads * sizeof(info));
int a = ntables / threads;
if (a < 1) {
threads = ntables;
a = 1;
}
int b = 0;
if (threads != 0)
b = ntables % threads;
int last = 0;
for (int i = 0; i < threads; i++) {
info *t_info = infos + i;
t_info->threadID = i;
tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE);
tstrncpy(t_info->tb_prefix, tb_prefix, MAX_TB_NAME_SIZE);
t_info->taos = taos_connect(ip_addr, user, pass, db_name, port);
t_info->start_table_id = last;
t_info->end_table_id = i < b ? last + a : last + a - 1;
last = t_info->end_table_id + 1;
t_info->use_metric = use_metric;
t_info->cols = cols;
pthread_create(pids + i, NULL, createTable, t_info);
}
for (int i = 0; i < threads; i++) {
pthread_join(pids[i], NULL);
}
double t = getCurrentTime() - ts;
printf("Spent %.4f seconds to create %d tables with %d connections\n", t, ntables, threads);
for (int i = 0; i < threads; i++) {
info *t_info = infos + i;
taos_close(t_info->taos);
sem_destroy(&(t_info->mutex_sem));
sem_destroy(&(t_info->lock_sem));
}
free(pids);
free(infos);
return ;
}
void *readTable(void *sarg) {
info *rinfo = (info *)sarg;
TAOS *taos = rinfo->taos;
......
......@@ -13,4 +13,6 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
ADD_EXECUTABLE(taosmigrate ${SRC})
TARGET_LINK_LIBRARIES(taosmigrate common tutil cJson)
ENDIF ()
\ No newline at end of file
ENDIF ()
SET_SOURCE_FILES_PROPERTIES(./taosmigrate.c PROPERTIES COMPILE_FLAGS -w)
......@@ -68,7 +68,7 @@ int32_t mnodeInitProfile() {
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_KILL_STREAM, mnodeProcessKillStreamMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_KILL_CONN, mnodeProcessKillConnectionMsg);
tsMnodeConnCache = taosCacheInitWithCb(TSDB_DATA_TYPE_INT, CONN_CHECK_TIME, false, mnodeFreeConn, "conn");
tsMnodeConnCache = taosCacheInit(TSDB_DATA_TYPE_INT, CONN_CHECK_TIME, false, mnodeFreeConn, "conn");
return 0;
}
......
......@@ -65,7 +65,7 @@ int32_t mnodeInitShow() {
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_CONNECT, mnodeProcessConnectMsg);
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_USE_DB, mnodeProcessUseMsg);
tsMnodeShowCache = taosCacheInitWithCb(TSDB_DATA_TYPE_INT, 5, false, mnodeFreeShowObj, "show");
tsMnodeShowCache = taosCacheInit(TSDB_DATA_TYPE_INT, 5, false, mnodeFreeShowObj, "show");
return 0;
}
......
......@@ -165,6 +165,8 @@ static int32_t mnodeVgroupActionUpdate(SSdbOper *pOper) {
}
mnodeDecDnodeRef(pDnode);
}
free(pNew);
}
mnodeVgroupUpdateIdPool(pVgroup);
......
......@@ -86,9 +86,28 @@ extern "C" {
} \
}
#ifdef TAOS_RANDOM_NETWORK_FAIL
ssize_t taos_send_random_fail(int sockfd, const void *buf, size_t len, int flags);
ssize_t taos_sendto_random_fail(int sockfd, const void *buf, size_t len, int flags,
const struct sockaddr *dest_addr, socklen_t addrlen);
ssize_t taos_read_random_fail(int fd, void *buf, size_t count);
ssize_t taos_write_random_fail(int fd, const void *buf, size_t count);
#define send(sockfd, buf, len, flags) taos_send_random_fail(sockfd, buf, len, flags)
#define sendto(sockfd, buf, len, flags, dest_addr, addrlen) \
taos_sendto_random_fail(sockfd, buf, len, flags, dest_addr, addrlen)
#define taosWriteSocket(fd, buf, len) taos_write_random_fail(fd, buf, len)
#define taosReadSocket(fd, buf, len) taos_read_random_fail(fd, buf, len)
#else
#define taosWriteSocket(fd, buf, len) write(fd, buf, len)
#define taosReadSocket(fd, buf, len) read(fd, buf, len)
#endif /* TAOS_RANDOM_NETWORK_FAIL */
#define atomic_load_8(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_16(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
#define atomic_load_32(ptr) __atomic_load_n((ptr), __ATOMIC_SEQ_CST)
......
......@@ -270,3 +270,49 @@ int tSystem(const char * cmd)
}
}
#ifdef TAOS_RANDOM_NETWORK_FAIL
#define RANDOM_NETWORK_FAIL_FACTOR 20
ssize_t taos_send_random_fail(int sockfd, const void *buf, size_t len, int flags)
{
if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) {
errno = ECONNRESET;
return -1;
}
return send(sockfd, buf, len, flags);
}
ssize_t taos_sendto_random_fail(int sockfd, const void *buf, size_t len, int flags,
const struct sockaddr *dest_addr, socklen_t addrlen)
{
if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) {
errno = ECONNRESET;
return -1;
}
return sendto(sockfd, buf, len, flags, dest_addr, addrlen);
}
ssize_t taos_read_random_fail(int fd, void *buf, size_t count)
{
if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) {
errno = ECONNRESET;
return -1;
}
return read(fd, buf, count);
}
ssize_t taos_write_random_fail(int fd, const void *buf, size_t count)
{
if (rand() % RANDOM_NETWORK_FAIL_FACTOR == 0) {
errno = EINTR;
return -1;
}
return write(fd, buf, count);
}
#endif /* TAOS_RANDOM_NETWORK_FAIL */
......@@ -160,7 +160,7 @@ static void taosGetSystemTimezone() {
/* load time zone string from /etc/timezone */
FILE *f = fopen("/etc/timezone", "r");
char buf[65] = {0};
char buf[68] = {0};
if (f != NULL) {
int len = fread(buf, 64, 1, f);
if(len < 64 && ferror(f)) {
......@@ -170,18 +170,17 @@ static void taosGetSystemTimezone() {
}
fclose(f);
}
char *lineEnd = strstr(buf, "\n");
if (lineEnd != NULL) {
*lineEnd = 0;
}
char *lineEnd = strstr(buf, "\n");
if (lineEnd != NULL) {
*lineEnd = 0;
}
// for CentOS system, /etc/timezone does not exist. Ignore the TZ environment variables
if (strlen(buf) > 0) {
setenv("TZ", buf, 1);
// for CentOS system, /etc/timezone does not exist. Ignore the TZ environment variables
if (strlen(buf) > 0) {
setenv("TZ", buf, 1);
}
}
// get and set default timezone
tzset();
......
......@@ -26,8 +26,6 @@ extern int32_t httpDebugFlag;
#define httpInfo(...) { if (httpDebugFlag & DEBUG_INFO) { taosPrintLog("HTP INFO ", 255, __VA_ARGS__); }}
#define httpDebug(...) { if (httpDebugFlag & DEBUG_DEBUG) { taosPrintLog("HTP DEBUG ", httpDebugFlag, __VA_ARGS__); }}
#define httpTrace(...) { if (httpDebugFlag & DEBUG_TRACE) { taosPrintLog("HTP TRACE ", httpDebugFlag, __VA_ARGS__); }}
#define httpDebugDump(...) { if (httpDebugFlag & DEBUG_DEBUG) { taosPrintLongString("HTP DEBUG ", httpDebugFlag, __VA_ARGS__); }}
#define httpTraceDump(...) { if (httpDebugFlag & DEBUG_TRACE) { taosPrintLongString("HTP TRACE ", httpDebugFlag, __VA_ARGS__); }}
#define httpTraceL(...){ if (httpDebugFlag & DEBUG_TRACE) { taosPrintLongString("HTP TRACE ", httpDebugFlag, __VA_ARGS__); }}
#endif
......@@ -58,7 +58,7 @@ static void httpDestroyContext(void *data) {
}
bool httpInitContexts() {
tsHttpServer.contextCache = taosCacheInitWithCb(TSDB_DATA_TYPE_BIGINT, 2, false, httpDestroyContext, "restc");
tsHttpServer.contextCache = taosCacheInit(TSDB_DATA_TYPE_BIGINT, 2, false, httpDestroyContext, "restc");
if (tsHttpServer.contextCache == NULL) {
httpError("failed to init context cache");
return false;
......
......@@ -313,9 +313,9 @@ bool httpParseRequest(HttpContext* pContext) {
return true;
}
httpTraceDump("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, raw data:\n%s", pContext, pContext->fd,
pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds, pContext->parser.bufsize,
pContext->parser.buffer);
httpTraceL("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, raw data:\n%s", pContext, pContext->fd,
pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds, pContext->parser.bufsize,
pContext->parser.buffer);
if (!httpGetHttpMethod(pContext)) {
return false;
......
......@@ -108,7 +108,7 @@ bool httpReadDataImp(HttpContext *pContext) {
static bool httpDecompressData(HttpContext *pContext) {
if (pContext->contentEncoding != HTTP_COMPRESS_GZIP) {
httpTraceDump("context:%p, fd:%d, ip:%s, content:%s", pContext, pContext->fd, pContext->ipstr, pContext->parser.data.pos);
httpTraceL("context:%p, fd:%d, ip:%s, content:%s", pContext, pContext->fd, pContext->ipstr, pContext->parser.data.pos);
return true;
}
......@@ -124,8 +124,8 @@ static bool httpDecompressData(HttpContext *pContext) {
if (ret == 0) {
memcpy(pContext->parser.data.pos, decompressBuf, decompressBufLen);
pContext->parser.data.pos[decompressBufLen] = 0;
httpTraceDump("context:%p, fd:%d, ip:%s, rawSize:%d, decompressSize:%d, content:%s",
pContext, pContext->fd, pContext->ipstr, pContext->parser.data.len, decompressBufLen, decompressBuf);
httpTraceL("context:%p, fd:%d, ip:%s, rawSize:%d, decompressSize:%d, content:%s", pContext, pContext->fd,
pContext->ipstr, pContext->parser.data.len, decompressBufLen, decompressBuf);
pContext->parser.data.len = decompressBufLen;
} else {
httpError("context:%p, fd:%d, ip:%s, failed to decompress data, rawSize:%d, error:%d",
......
......@@ -115,7 +115,7 @@ void httpCleanUpSessions() {
}
bool httpInitSessions() {
tsHttpServer.sessionCache = taosCacheInitWithCb(TSDB_DATA_TYPE_BINARY, 5, false, httpDestroySession, "rests");
tsHttpServer.sessionCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, 5, false, httpDestroySession, "rests");
if (tsHttpServer.sessionCache == NULL) {
httpError("failed to init session cache");
return false;
......
......@@ -166,8 +166,8 @@ void httpProcessMultiSql(HttpContext *pContext) {
HttpSqlCmd *cmd = multiCmds->cmds + multiCmds->pos;
char *sql = httpGetCmdsString(pContext, cmd->sql);
httpTraceDump("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start query, sql:%s", pContext, pContext->fd,
pContext->ipstr, pContext->user, multiCmds->pos, sql);
httpTraceL("context:%p, fd:%d, ip:%s, user:%s, process pos:%d, start query, sql:%s", pContext, pContext->fd,
pContext->ipstr, pContext->user, multiCmds->pos, sql);
taosNotePrintHttp(sql);
taos_query_a(pContext->session->taos, sql, httpProcessMultiSqlCallBack, (void *)pContext);
}
......@@ -306,8 +306,8 @@ void httpProcessSingleSqlCmd(HttpContext *pContext) {
return;
}
httpTraceDump("context:%p, fd:%d, ip:%s, user:%s, start query, sql:%s", pContext, pContext->fd, pContext->ipstr,
pContext->user, sql);
httpTraceL("context:%p, fd:%d, ip:%s, user:%s, start query, sql:%s", pContext, pContext->fd, pContext->ipstr,
pContext->user, sql);
taosNotePrintHttp(sql);
taos_query_a(pSession->taos, sql, httpProcessSingleSqlCallBack, (void *)pContext);
}
......
......@@ -35,9 +35,6 @@
#define monitorDebug(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLog("MON DEBUG ", monitorDebugFlag, __VA_ARGS__); }}
#define monitorTrace(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLog("MON TRACE ", monitorDebugFlag, __VA_ARGS__); }}
#define monitorDebugDump(...) { if (monitorDebugFlag & DEBUG_DEBUG) { taosPrintLongString("MON DEBUG ", monitorDebugFlag, __VA_ARGS__); }}
#define monitorTraceDump(...) { if (monitorDebugFlag & DEBUG_TRACE) { taosPrintLongString("MON TRACE ", monitorDebugFlag, __VA_ARGS__); }}
#define SQL_LENGTH 1024
#define LOG_LEN_STR 100
#define IP_LEN_STR 18
......
......@@ -27,7 +27,4 @@ extern int32_t mqttDebugFlag;
#define mqttDebug(...) { if (mqttDebugFlag & DEBUG_DEBUG) { taosPrintLog("MQT DEBUG ", mqttDebugFlag, __VA_ARGS__); }}
#define mqttTrace(...) { if (mqttDebugFlag & DEBUG_TRACE) { taosPrintLog("MQT TRACE ", mqttDebugFlag, __VA_ARGS__); }}
#define mqttDebugDump(...) { if (mqttDebugFlag & DEBUG_DEBUG) { taosPrintLongString("MQT DEBUG ", mqttDebugFlag, __VA_ARGS__); }}
#define mqttTraceDump(...) { if (mqttDebugFlag & DEBUG_TRACE) { taosPrintLongString("MQT DEBUG ", mqttDebugFlag, __VA_ARGS__); }}
#endif
......@@ -111,7 +111,7 @@ void mqttStopSystem() {
}
void mqttCleanUpSystem() {
mqttInfo("starting to clean up mqtt");
mqttInfo("starting to cleanup mqtt");
free(recntStatus.user_name);
free(recntStatus.password);
free(recntStatus.hostname);
......
......@@ -154,6 +154,7 @@ typedef struct SQuery {
} SQuery;
typedef struct SQueryRuntimeEnv {
jmp_buf env;
SResultInfo* resultInfo; // todo refactor to merge with SWindowResInfo
SQuery* pQuery;
SQLFunctionCtx* pCtx;
......@@ -169,6 +170,8 @@ typedef struct SQueryRuntimeEnv {
void* pSecQueryHandle; // another thread for
bool stableQuery; // super table query or not
bool topBotQuery; // false
bool groupbyNormalCol; // denote if this is a groupby normal column query
bool hasTagResults; // if there are tag values in final result or not
int32_t prevGroupId; // previous executed group id
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
} SQueryRuntimeEnv;
......@@ -197,8 +200,10 @@ typedef struct SQInfo {
*/
int32_t tableIndex;
int32_t numOfGroupResultPages;
_qinfo_free_fn_t freeFn;
jmp_buf env;
_qinfo_free_fn_t freeFn; //todo remove it
void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables;
} SQInfo;
#endif // TDENGINE_QUERYEXECUTOR_H
......@@ -60,8 +60,6 @@ typedef struct SPoint {
void * val;
} SPoint;
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, char timeUnit, int16_t precision);
SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType,
SFillColInfo* pFillCol);
......
此差异已折叠。
......@@ -32,7 +32,6 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
pWindowResInfo->threshold = threshold;
pWindowResInfo->type = type;
_hash_fn_t fn = taosGetDefaultHashFunction(type);
pWindowResInfo->hashList = taosHashInit(threshold, fn, false);
......@@ -54,7 +53,8 @@ void destroyTimeWindowRes(SWindowResult *pWindowRes, int32_t nOutputCols) {
if (pWindowRes == NULL) {
return;
}
// TODO opt malloc strategy
for (int32_t i = 0; i < nOutputCols; ++i) {
free(pWindowRes->resultInfo[i].interResultBuf);
}
......@@ -180,19 +180,34 @@ void closeAllTimeWindow(SWindowResInfo *pWindowResInfo) {
/*
* remove the results that are not the FIRST time window that spreads beyond the
* the last qualified time stamp in case of sliding query, which the sliding time is not equalled to the interval time
* the last qualified time stamp in case of sliding query, which the sliding time is not equalled to the interval time.
* NOTE: remove redundant, only when the result set order equals to traverse order
*/
void removeRedundantWindow(SWindowResInfo *pWindowResInfo, TSKEY lastKey, int32_t order) {
assert(pWindowResInfo->size >= 0 && pWindowResInfo->capacity >= pWindowResInfo->size);
if (pWindowResInfo->size <= 1) {
return;
}
// get the result order
int32_t resultOrder = (pWindowResInfo->pResult[0].window.skey < pWindowResInfo->pResult[1].window.skey)?
TSDB_ORDER_ASC:TSDB_ORDER_DESC;
if (order != resultOrder) {
return;
}
int32_t i = 0;
while (i < pWindowResInfo->size &&
((pWindowResInfo->pResult[i].window.ekey < lastKey && order == QUERY_ASC_FORWARD_STEP) ||
(pWindowResInfo->pResult[i].window.skey > lastKey && order == QUERY_DESC_FORWARD_STEP))) {
++i;
if (order == QUERY_ASC_FORWARD_STEP) {
while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i].window.ekey < lastKey)) {
++i;
}
} else if (order == QUERY_DESC_FORWARD_STEP) {
while (i < pWindowResInfo->size && (pWindowResInfo->pResult[i].window.skey > lastKey)) {
++i;
}
}
// assert(i < pWindowResInfo->size);
if (i < pWindowResInfo->size) {
pWindowResInfo->size = (i + 1);
}
......
......@@ -118,7 +118,7 @@ static bool tExtMemBufferAlloc(tExtMemBuffer *pMemBuffer) {
* To flush data to disk to accommodate more data
*/
if (pMemBuffer->numOfInMemPages > 0 && pMemBuffer->numOfInMemPages == pMemBuffer->inMemCapacity) {
if (!tExtMemBufferFlush(pMemBuffer)) {
if (tExtMemBufferFlush(pMemBuffer) != 0) {
return false;
}
}
......@@ -268,6 +268,7 @@ int32_t tExtMemBufferFlush(tExtMemBuffer *pMemBuffer) {
size_t retVal = fwrite((char *)&(first->item), pMemBuffer->pageSize, 1, pMemBuffer->file);
if (retVal <= 0) { // failed to write to buffer, may be not enough space
ret = TAOS_SYSTEM_ERROR(errno);
return ret;
}
pMemBuffer->fileMeta.numOfElemsInFile += first->item.num;
......
......@@ -22,41 +22,6 @@
#define FILL_IS_ASC_FILL(_f) ((_f)->order == TSDB_ORDER_ASC)
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
return startTime;
}
if (timeUnit == 'a' || timeUnit == 'm' || timeUnit == 's' || timeUnit == 'h') {
return (startTime / slidingTime) * slidingTime;
} else {
/*
* here we revised the start time of day according to the local time zone,
* but in case of DST, the start time of one day need to be dynamically decided.
*
* TODO dynamically decide the start time of a day, move to common module
*/
// todo refactor to extract function that is available for Linux/Windows/Mac platform
#if defined(WINDOWS) && _MSC_VER >= 1900
// see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
int64_t timezone = _timezone;
int32_t daylight = _daylight;
char** tzname = _tzname;
#endif
int64_t t = (precision == TSDB_TIME_PRECISION_MILLI) ? MILLISECOND_PER_SECOND : MILLISECOND_PER_SECOND * 1000L;
int64_t revStartime = (startTime / slidingTime) * slidingTime + timezone * t;
int64_t revEndtime = revStartime + slidingTime - 1;
if (revEndtime < startTime) {
revStartime += slidingTime;
}
return revStartime;
}
}
SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_t capacity, int32_t numOfCols,
int64_t slidingTime, int8_t slidingUnit, int8_t precision, int32_t fillType, SFillColInfo* pFillCol) {
if (fillType == TSDB_FILL_NONE) {
......@@ -128,7 +93,7 @@ static TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int64_t timeInterva
if (order == TSDB_ORDER_ASC) {
return ekey;
} else {
return taosGetIntervalStartTimestamp(ekey, timeInterval, slidingTimeUnit, precision);
return taosGetIntervalStartTimestamp(ekey, timeInterval, timeInterval, slidingTimeUnit, precision);
}
}
......
......@@ -31,9 +31,7 @@ extern int32_t tscEmbedded;
#define tInfo(...) { if (rpcDebugFlag & DEBUG_INFO) { taosPrintLog("RPC INFO ", tscEmbedded ? 255 : rpcDebugFlag, __VA_ARGS__); }}
#define tDebug(...) { if (rpcDebugFlag & DEBUG_DEBUG) { taosPrintLog("RPC DEBUG ", rpcDebugFlag, __VA_ARGS__); }}
#define tTrace(...) { if (rpcDebugFlag & DEBUG_TRACE) { taosPrintLog("RPC TRACE ", rpcDebugFlag, __VA_ARGS__); }}
#define tDebugDump(x, y) { if (rpcDebugFlag & DEBUG_DEBUG) { taosDumpData((unsigned char *)x, y); }}
#define tTraceDump(x, y) { if (rpcDebugFlag & DEBUG_TRACE) { taosDumpData((unsigned char *)x, y); }}
#define tDump(x, y) { if (rpcDebugFlag & DEBUG_DUMP) { taosDumpData((unsigned char *)x, y); }}
#ifdef __cplusplus
}
......
......@@ -73,6 +73,7 @@ typedef struct {
SRpcInfo *pRpc; // associated SRpcInfo
SRpcIpSet ipSet; // ip list provided by app
void *ahandle; // handle provided by app
void *signature; // for validation
struct SRpcConn *pConn; // pConn allocated
char msgType; // message type
uint8_t *pCont; // content provided by app
......@@ -361,6 +362,7 @@ void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, SRpcMsg *pMsg) {
int contLen = rpcCompressRpcMsg(pMsg->pCont, pMsg->contLen);
pContext = (SRpcReqContext *) (pMsg->pCont-sizeof(SRpcHead)-sizeof(SRpcReqContext));
pContext->ahandle = pMsg->ahandle;
pContext->signature = pContext;
pContext->pRpc = (SRpcInfo *)shandle;
pContext->ipSet = *pIpSet;
pContext->contLen = contLen;
......@@ -527,13 +529,16 @@ int rpcReportProgress(void *handle, char *pCont, int contLen) {
return code;
}
/* todo: cancel process may have race condition, pContext may have been released
just before app calls the rpcCancelRequest */
void rpcCancelRequest(void *handle) {
SRpcReqContext *pContext = handle;
// signature is used to check if pContext is freed.
// pContext may have been released just before app calls the rpcCancelRequest
if (pContext->signature != pContext) return;
if (pContext->pConn) {
tDebug("%s, app trys to cancel request", pContext->pConn->info);
pContext->pConn->pReqMsg = NULL;
rpcCloseConn(pContext->pConn);
pContext->pConn = NULL;
rpcFreeCont(pContext->pCont);
......@@ -598,8 +603,13 @@ static void rpcReleaseConn(SRpcConn *pConn) {
rpcFreeMsg(pConn->pRspMsg); // it may have a response msg saved, but not request msg
pConn->pRspMsg = NULL;
if (pConn->pReqMsg) rpcFreeCont(pConn->pReqMsg);
}
// if server has ever reported progress, free content
if (pConn->pReqMsg) rpcFreeCont(pConn->pReqMsg); // do not use rpcFreeMsg
} else {
// if there is an outgoing message, free it
if (pConn->outType && pConn->pReqMsg)
rpcFreeMsg(pConn->pReqMsg);
}
// memset could not be used, since lockeBy can not be reset
pConn->inType = 0;
......@@ -955,6 +965,7 @@ static void rpcProcessBrokenLink(SRpcConn *pConn) {
if (pConn->outType) {
SRpcReqContext *pContext = pConn->pContext;
pContext->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
pConn->pReqMsg = NULL;
taosTmrStart(rpcProcessConnError, 0, pContext, pRpc->tmrCtrl);
}
......@@ -969,7 +980,7 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
SRpcInfo *pRpc = (SRpcInfo *)pRecv->shandle;
SRpcConn *pConn = (SRpcConn *)pRecv->thandle;
tTraceDump(pRecv->msg, pRecv->msgLen);
tDump(pRecv->msg, pRecv->msgLen);
// underlying UDP layer does not know it is server or client
pRecv->connType = pRecv->connType | pRpc->connType;
......@@ -1005,6 +1016,7 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
static void rpcNotifyClient(SRpcReqContext *pContext, SRpcMsg *pMsg) {
SRpcInfo *pRpc = pContext->pRpc;
pContext->signature = NULL;
pContext->pConn = NULL;
if (pContext->pRsp) {
// for synchronous API
......@@ -1056,6 +1068,7 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
SRpcReqContext *pContext = pConn->pContext;
rpcMsg.handle = pContext;
pConn->pContext = NULL;
pConn->pReqMsg = NULL;
// for UDP, port may be changed by server, the port in ipSet shall be used for cache
if (pHead->code != TSDB_CODE_RPC_TOO_SLOW) {
......@@ -1242,7 +1255,7 @@ static void rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) {
tError("%s, failed to send, msgLen:%d written:%d, reason:%s", pConn->info, msgLen, writtenLen, strerror(errno));
}
tTraceDump(msg, msgLen);
tDump(msg, msgLen);
}
static void rpcProcessConnError(void *param, void *id) {
......@@ -1292,6 +1305,7 @@ static void rpcProcessRetryTimer(void *param, void *tmrId) {
tDebug("%s, failed to send msg:%s to %s:%hu", pConn->info, taosMsg[pConn->outType], pConn->peerFqdn, pConn->peerPort);
if (pConn->pContext) {
pConn->pContext->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
pConn->pReqMsg = NULL;
taosTmrStart(rpcProcessConnError, 0, pConn->pContext, pRpc->tmrCtrl);
rpcReleaseConn(pConn);
}
......
......@@ -47,9 +47,9 @@ extern int tsdbDebugFlag;
// Definitions
// ------------------ tsdbMeta.c
typedef struct STable {
STableId tableId;
ETableType type;
tstr* name; // NOTE: there a flexible string here
STableId tableId;
uint64_t suid;
struct STable* pSuper; // super table pointer
uint8_t numOfSchemas;
......@@ -298,16 +298,71 @@ STsdbMeta* tsdbNewMeta(STsdbCfg* pCfg);
void tsdbFreeMeta(STsdbMeta* pMeta);
int tsdbOpenMeta(STsdbRepo* pRepo);
int tsdbCloseMeta(STsdbRepo* pRepo);
STSchema* tsdbGetTableSchema(STable* pTable);
STable* tsdbGetTableByUid(STsdbMeta* pMeta, uint64_t uid);
STSchema* tsdbGetTableSchemaByVersion(STable* pTable, int16_t version);
STSchema* tsdbGetTableTagSchema(STable* pTable);
int tsdbUpdateTable(STsdbRepo* pRepo, STable* pTable, STableCfg* pCfg);
int tsdbWLockRepoMeta(STsdbRepo* pRepo);
int tsdbRLockRepoMeta(STsdbRepo* pRepo);
int tsdbUnlockRepoMeta(STsdbRepo* pRepo);
void tsdbRefTable(STable* pTable);
void tsdbUnRefTable(STable* pTable);
void tsdbUpdateTableSchema(STsdbRepo* pRepo, STable* pTable, STSchema* pSchema, bool insertAct);
static FORCE_INLINE int tsdbCompareSchemaVersion(const void *key1, const void *key2) {
if (*(int16_t *)key1 < schemaVersion(*(STSchema **)key2)) {
return -1;
} else if (*(int16_t *)key1 > schemaVersion(*(STSchema **)key2)) {
return 1;
} else {
return 0;
}
}
static FORCE_INLINE STSchema* tsdbGetTableSchemaImpl(STable* pTable, bool lock, bool copy, int16_t version) {
STable* pDTable = (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable;
STSchema* pSchema = NULL;
STSchema* pTSchema = NULL;
if (lock) taosRLockLatch(&(pDTable->latch));
if (version < 0) { // get the latest version of schema
pTSchema = pDTable->schema[pDTable->numOfSchemas - 1];
} else { // get the schema with version
void* ptr = taosbsearch(&version, pDTable->schema, pDTable->numOfSchemas, sizeof(STSchema*),
tsdbCompareSchemaVersion, TD_EQ);
if (ptr == NULL) {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
goto _exit;
}
pTSchema = *(STSchema**)ptr;
}
ASSERT(pTSchema != NULL);
if (copy) {
if ((pSchema = tdDupSchema(pTSchema)) == NULL) terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
} else {
pSchema = pTSchema;
}
_exit:
if (lock) taosRUnLockLatch(&(pDTable->latch));
return pSchema;
}
static FORCE_INLINE STSchema* tsdbGetTableSchema(STable* pTable) {
return tsdbGetTableSchemaImpl(pTable, false, false, -1);
}
static FORCE_INLINE STSchema *tsdbGetTableTagSchema(STable *pTable) {
if (pTable->type == TSDB_CHILD_TABLE) { // check child table first
STable *pSuper = pTable->pSuper;
if (pSuper == NULL) return NULL;
return pSuper->tagSchema;
} else if (pTable->type == TSDB_SUPER_TABLE) {
return pTable->tagSchema;
} else {
return NULL;
}
}
// ------------------ tsdbBuffer.c
STsdbBufPool* tsdbNewBufPool();
......
......@@ -28,6 +28,7 @@
#include "tsdbMain.h"
#include "tutil.h"
#include "ttime.h"
#include "tfile.h"
const char *tsdbFileSuffix[] = {".head", ".data", ".last", "", ".h", ".l"};
......
......@@ -41,9 +41,9 @@ typedef struct {
} SSubmitBlkIter;
typedef struct {
int32_t totalLen;
int32_t len;
SSubmitBlk *pBlock;
int32_t totalLen;
int32_t len;
void * pMsg;
} SSubmitMsgIter;
static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg);
......@@ -56,7 +56,7 @@ static STsdbRepo * tsdbNewRepo(char *rootDir, STsdbAppH *pAppH, STsdbCfg *pCfg);
static void tsdbFreeRepo(STsdbRepo *pRepo);
static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter);
static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY now, int32_t *affectedrows);
static SSubmitBlk *tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter);
static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock);
static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter);
static int tsdbRestoreInfo(STsdbRepo *pRepo);
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter);
......@@ -68,6 +68,7 @@ static int keyFGroupCompFunc(const void *key, const void *fgroup);
static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg);
static void * tsdbDecodeCfg(void *buf, STsdbCfg *pCfg);
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable);
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg);
// Function declaration
int32_t tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg) {
......@@ -164,6 +165,13 @@ int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *
STsdbRepo * pRepo = (STsdbRepo *)repo;
SSubmitMsgIter msgIter = {0};
if (tsdbScanAndConvertSubmitMsg(pRepo, pMsg) < 0) {
if (terrno != TSDB_CODE_TDB_TABLE_RECONFIGURE) {
tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno));
}
return -1;
}
if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) {
tsdbError("vgId:%d failed to insert data since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
......@@ -173,12 +181,14 @@ int32_t tsdbInsertData(TSDB_REPO_T *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *
int32_t affectedrows = 0;
TSKEY now = taosGetTimestamp(pRepo->config.precision);
while ((pBlock = tsdbGetSubmitMsgNext(&msgIter)) != NULL) {
while (true) {
tsdbGetSubmitMsgNext(&msgIter, &pBlock);
if (pBlock == NULL) break;
if (tsdbInsertDataToTable(pRepo, pBlock, now, &affectedrows) < 0) {
return -1;
}
}
if (pRsp != NULL) pRsp->affectedRows = htonl(affectedrows);
return 0;
}
......@@ -263,7 +273,7 @@ void tsdbStartStream(TSDB_REPO_T *repo) {
STable *pTable = pMeta->tables[i];
if (pTable && pTable->type == TSDB_STREAM_TABLE) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql,
tsdbGetTableSchema(pTable));
tsdbGetTableSchemaImpl(pTable, false, false, -1));
}
}
}
......@@ -694,17 +704,12 @@ static int tsdbInitSubmitMsgIter(SSubmitMsg *pMsg, SSubmitMsgIter *pIter) {
return -1;
}
pMsg->length = htonl(pMsg->length);
pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
pMsg->compressed = htonl(pMsg->compressed);
pIter->totalLen = pMsg->length;
pIter->len = TSDB_SUBMIT_MSG_HEAD_SIZE;
pIter->len = 0;
pIter->pMsg = pMsg;
if (pMsg->length <= TSDB_SUBMIT_MSG_HEAD_SIZE) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
return -1;
} else {
pIter->pBlock = pMsg->blocks;
}
return 0;
......@@ -714,26 +719,8 @@ static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY
STsdbMeta *pMeta = pRepo->tsdbMeta;
int64_t points = 0;
STable *pTable = tsdbGetTableByUid(pMeta, pBlock->uid);
if (pTable == NULL || TABLE_TID(pTable) != pBlock->tid) {
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable));
terrno = TSDB_CODE_TDB_INVALID_ACTION;
return -1;
}
// Check schema version and update schema if needed
if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) {
tsdbError("vgId:%d failed to insert data to table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
return -1;
}
STable *pTable = pMeta->tables[pBlock->tid];
ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
SSubmitBlkIter blkIter = {0};
SDataRow row = NULL;
......@@ -764,27 +751,23 @@ static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY
return 0;
}
static SSubmitBlk *tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter) {
SSubmitBlk *pBlock = pIter->pBlock;
if (pBlock == NULL) return NULL;
pBlock->dataLen = htonl(pBlock->dataLen);
pBlock->schemaLen = htonl(pBlock->schemaLen);
pBlock->numOfRows = htons(pBlock->numOfRows);
pBlock->uid = htobe64(pBlock->uid);
pBlock->tid = htonl(pBlock->tid);
pBlock->sversion = htonl(pBlock->sversion);
pBlock->padding = htonl(pBlock->padding);
pIter->len = pIter->len + sizeof(SSubmitBlk) + pBlock->dataLen;
if (pIter->len >= pIter->totalLen) {
pIter->pBlock = NULL;
static int tsdbGetSubmitMsgNext(SSubmitMsgIter *pIter, SSubmitBlk **pPBlock) {
if (pIter->len == 0) {
pIter->len += TSDB_SUBMIT_MSG_HEAD_SIZE;
} else {
pIter->pBlock = (SSubmitBlk *)((char *)pBlock + pBlock->dataLen + sizeof(SSubmitBlk));
SSubmitBlk *pSubmitBlk = (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len);
pIter->len += (sizeof(SSubmitBlk) + pSubmitBlk->dataLen + pSubmitBlk->schemaLen);
}
return pBlock;
if (pIter->len > pIter->totalLen) {
terrno = TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP;
*pPBlock = NULL;
return -1;
}
*pPBlock = (pIter->len == pIter->totalLen) ? NULL : (SSubmitBlk *)POINTER_SHIFT(pIter->pMsg, pIter->len);
return 0;
}
static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) {
......@@ -969,42 +952,64 @@ static void *tsdbDecodeCfg(void *buf, STsdbCfg *pCfg) {
static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pTable) {
ASSERT(pTable != NULL);
STSchema *pSchema = tsdbGetTableSchema(pTable);
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
int sversion = schemaVersion(pSchema);
if (pBlock->sversion == sversion) return 0;
if (pBlock->sversion > sversion) { // need to config
tsdbDebug("vgId:%d table %s tid %d has version %d smaller than client version %d, try to config", REPO_ID(pRepo),
TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), sversion, pBlock->sversion);
if (pRepo->appH.configFunc) {
void *msg = (*pRepo->appH.configFunc)(REPO_ID(pRepo), TABLE_TID(pTable));
if (msg == NULL) {
tsdbError("vgId:%d failed to config table %s tid %d since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
TABLE_TID(pTable), tstrerror(terrno));
if (pBlock->sversion == sversion) {
return 0;
} else {
if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE) { // stream table is not allowed to change schema
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
return -1;
}
}
if (pBlock->sversion > sversion) { // may need to update table schema
if (pBlock->schemaLen > 0) {
tsdbDebug(
"vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, update...",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion);
ASSERT(pBlock->schemaLen % sizeof(STColumn) == 0);
int numOfCols = pBlock->schemaLen / sizeof(STColumn);
STColumn *pTCol = (STColumn *)pBlock->data;
STSchemaBuilder schemaBuilder = {0};
if (tdInitTSchemaBuilder(&schemaBuilder, pBlock->sversion) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
return -1;
}
STableCfg *pTableCfg = tsdbCreateTableCfgFromMsg(msg);
if (pTableCfg == NULL) {
rpcFreeCont(msg);
return -1;
for (int i = 0; i < numOfCols; i++) {
if (tdAddColToSchema(&schemaBuilder, pTCol[i].type, htons(pTCol[i].colId), htons(pTCol[i].bytes)) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbError("vgId:%d failed to update schema of table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
tdDestroyTSchemaBuilder(&schemaBuilder);
return -1;
}
}
if (tsdbUpdateTable(pRepo, (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE) ? pTable->pSuper : pTable, pTableCfg) < 0) {
tsdbError("vgId:%d failed to update table %s since %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
tstrerror(terrno));
tsdbClearTableCfg(pTableCfg);
rpcFreeCont(msg);
STSchema *pNSchema = tdGetSchemaFromBuilder(&schemaBuilder);
if (pNSchema == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tdDestroyTSchemaBuilder(&schemaBuilder);
return -1;
}
tsdbClearTableCfg(pTableCfg);
rpcFreeCont(msg);
tdDestroyTSchemaBuilder(&schemaBuilder);
tsdbUpdateTableSchema(pRepo, pTable, pNSchema, true);
} else {
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
tsdbDebug(
"vgId:%d table %s tid %d uid %" PRIu64 " schema version %d is out of data, client version %d, reconfigure...",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), TABLE_TID(pTable), TABLE_UID(pTable), sversion, pBlock->sversion);
terrno = TSDB_CODE_TDB_TABLE_RECONFIGURE;
return -1;
}
} else {
if (tsdbGetTableSchemaByVersion(pTable, pBlock->sversion) == NULL) {
ASSERT(pBlock->sversion >= 0);
if (tsdbGetTableSchemaImpl(pTable, false, false, pBlock->sversion) == NULL) {
tsdbError("vgId:%d invalid submit schema version %d to table %s tid %d from client", REPO_ID(pRepo),
pBlock->sversion, TABLE_CHAR_NAME(pTable), TABLE_TID(pTable));
}
......@@ -1013,7 +1018,64 @@ static int tsdbCheckTableSchema(STsdbRepo *pRepo, SSubmitBlk *pBlock, STable *pT
}
return 0;
}
}
static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
ASSERT(pMsg != NULL);
STsdbMeta * pMeta = pRepo->tsdbMeta;
SSubmitMsgIter msgIter = {0};
SSubmitBlk * pBlock = NULL;
terrno = TSDB_CODE_SUCCESS;
pMsg->length = htonl(pMsg->length);
pMsg->numOfBlocks = htonl(pMsg->numOfBlocks);
if (tsdbInitSubmitMsgIter(pMsg, &msgIter) < 0) return -1;
while (true) {
if (tsdbGetSubmitMsgNext(&msgIter, &pBlock) < 0) return -1;
if (pBlock == NULL) break;
pBlock->uid = htobe64(pBlock->uid);
pBlock->tid = htonl(pBlock->tid);
pBlock->sversion = htonl(pBlock->sversion);
pBlock->dataLen = htonl(pBlock->dataLen);
pBlock->schemaLen = htonl(pBlock->schemaLen);
pBlock->numOfRows = htons(pBlock->numOfRows);
if (pBlock->tid <= 0 || pBlock->tid >= pRepo->config.maxTables) {
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
STable *pTable = pMeta->tables[pBlock->tid];
if (pTable == NULL || TABLE_UID(pTable) != pBlock->uid) {
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
return -1;
}
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
tsdbError("vgId:%d invalid action trying to insert a super table %s", REPO_ID(pRepo), TABLE_CHAR_NAME(pTable));
terrno = TSDB_CODE_TDB_INVALID_ACTION;
return -1;
}
// Check schema version and update schema if needed
if (tsdbCheckTableSchema(pRepo, pBlock, pTable) < 0) {
if (terrno == TSDB_CODE_TDB_TABLE_RECONFIGURE) {
continue;
} else {
return -1;
}
}
}
if (terrno != TSDB_CODE_SUCCESS) return -1;
return 0;
}
static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) {
// TODO
......
......@@ -538,10 +538,12 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
SCommitIter *pIter = iters + tid;
if (pIter->pTable == NULL) continue;
taosRLockLatch(&(pIter->pTable->latch));
tsdbSetHelperTable(pHelper, pIter->pTable, pRepo);
if (pIter->pIter != NULL) {
tdInitDataCols(pDataCols, tsdbGetTableSchema(pIter->pTable));
tdInitDataCols(pDataCols, tsdbGetTableSchemaImpl(pIter->pTable, false, false, -1));
int maxRowsToRead = pCfg->maxRowsPerFileBlock * 4 / 5;
int nLoop = 0;
......@@ -557,6 +559,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
int rowsWritten = tsdbWriteDataBlock(pHelper, pDataCols);
ASSERT(rowsWritten != 0);
if (rowsWritten < 0) {
taosRUnLockLatch(&(pIter->pTable->latch));
tsdbError("vgId:%d failed to write data block to table %s tid %d uid %" PRIu64 " since %s", REPO_ID(pRepo),
TABLE_CHAR_NAME(pIter->pTable), TABLE_TID(pIter->pTable), TABLE_UID(pIter->pTable),
tstrerror(terrno));
......@@ -571,6 +574,8 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
ASSERT(pDataCols->numOfRows == 0);
}
taosRUnLockLatch(&(pIter->pTable->latch));
// Move the last block to the new .l file if neccessary
if (tsdbMoveLastBlockIfNeccessary(pHelper) < 0) {
tsdbError("vgId:%d, failed to move last block, since %s", REPO_ID(pRepo), tstrerror(terrno));
......@@ -680,10 +685,10 @@ static int tsdbReadRowsFromCache(STsdbMeta *pMeta, STable *pTable, SSkipListIter
if (dataRowKey(row) > maxKey) break;
if (pSchema == NULL || schemaVersion(pSchema) != dataRowVersion(row)) {
pSchema = tsdbGetTableSchemaByVersion(pTable, dataRowVersion(row));
pSchema = tsdbGetTableSchemaImpl(pTable, true, false, dataRowVersion(row));
if (pSchema == NULL) {
// TODO: deal with the error here
ASSERT(false);
ASSERT(0);
}
}
......
此差异已折叠。
......@@ -19,6 +19,7 @@
#include "tcoding.h"
#include "tscompression.h"
#include "tsdbMain.h"
#include "tfile.h"
#define TSDB_GET_COMPCOL_LEN(nCols) (sizeof(SCompData) + sizeof(SCompCol) * (nCols) + sizeof(TSCKSUM))
......@@ -217,7 +218,7 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) {
pHelper->tableInfo.tid = pTable->tableId.tid;
pHelper->tableInfo.uid = pTable->tableId.uid;
STSchema *pSchema = tsdbGetTableSchema(pTable);
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
pHelper->tableInfo.sversion = schemaVersion(pSchema);
tdInitDataCols(pHelper->pDataCols[0], pSchema);
......
......@@ -65,7 +65,7 @@ typedef struct {
int64_t totalSize; // total allocated buffer in this hash table, SCacheObj is not included.
int64_t refreshTime;
STrashElem * pTrash;
const char * cacheName;
char* name;
// void * tmrCtrl;
// void * pTimer;
SCacheStatis statistics;
......@@ -163,8 +163,9 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove);
/**
* move all data node into trash, clear node in trash can if it is not referenced by any clients
* @param handle
* @param _remove remove the data or not if refcount is greater than 0
*/
void taosCacheEmpty(SCacheObj *pCacheObj);
void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove);
/**
* release all allocated memory and destroy the cache object.
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_TFILE_H
#define TDENGINE_TFILE_H
#ifdef TAOS_RANDOM_FILE_FAIL
ssize_t taos_tread(int fd, void *buf, size_t count);
ssize_t taos_twrite(int fd, void *buf, size_t count);
off_t taos_lseek(int fd, off_t offset, int whence);
#define tread(fd, buf, count) taos_tread(fd, buf, count)
#define twrite(fd, buf, count) taos_twrite(fd, buf, count)
#define lseek(fd, offset, whence) taos_lseek(fd, offset, whence)
#endif // TAOS_RANDOM_FILE_FAIL
#endif // TDENGINE_TFILE_H
......@@ -26,6 +26,7 @@ extern "C" {
#define DEBUG_INFO DEBUG_WARN
#define DEBUG_DEBUG 4U
#define DEBUG_TRACE 8U
#define DEBUG_DUMP 16U
#define DEBUG_SCREEN 64U
#define DEBUG_FILE 128U
......
......@@ -119,9 +119,8 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo
int32_t size = pNode->size;
taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize);
uDebug("key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes, cacheName:%s",
pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, size,
pCacheObj->cacheName);
uDebug("cache:%s, key:%p, %p is destroyed from cache, totalNum:%d totalSize:%" PRId64 "bytes size:%dbytes",
pCacheObj->name, pNode->key, pNode->data, (int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, size);
if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data);
free(pNode);
}
......@@ -226,7 +225,7 @@ static void doCleanupDataCache(SCacheObj *pCacheObj);
*/
static void* taosCacheRefresh(void *handle);
SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char* cacheName) {
SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char* cacheName) {
if (refreshTimeInSeconds <= 0) {
return NULL;
}
......@@ -238,7 +237,7 @@ SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bo
}
pCacheObj->pHashTable = taosHashInit(128, taosGetDefaultHashFunction(keyType), false);
pCacheObj->cacheName = cacheName;
pCacheObj->name = strdup(cacheName);
if (pCacheObj->pHashTable == NULL) {
free(pCacheObj);
uError("failed to allocate memory, reason:%s", strerror(errno));
......@@ -268,10 +267,6 @@ SCacheObj *taosCacheInitWithCb(int32_t keyType, int64_t refreshTimeInSeconds, bo
return pCacheObj;
}
SCacheObj *taosCacheInit(int32_t keyType, int64_t refreshTimeInSeconds, bool extendLifespan, __cache_freeres_fn_t fn, const char* cacheName) {
return taosCacheInitWithCb(keyType, refreshTimeInSeconds, extendLifespan, fn, cacheName);
}
void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const void *pData, size_t dataSize, int duration) {
SCacheDataNode *pNode;
......@@ -288,16 +283,16 @@ void *taosCachePut(SCacheObj *pCacheObj, const void *key, size_t keyLen, const v
if (NULL != pNode) {
pCacheObj->totalSize += pNode->size;
uDebug("key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d totalSize:%" PRId64
"bytes size:%" PRId64 "bytes, cacheName:%s",
key, pNode->data, pNode->addedTime, (pNode->lifespan * pNode->extendFactor + pNode->addedTime),
(int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, dataSize, pCacheObj->cacheName);
uDebug("cache:%s, key:%p, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", totalNum:%d totalSize:%" PRId64
"bytes size:%" PRId64 "bytes",
pCacheObj->name, key, pNode->data, pNode->addedTime, (pNode->lifespan * pNode->extendFactor + pNode->addedTime),
(int32_t)taosHashGetSize(pCacheObj->pHashTable), pCacheObj->totalSize, dataSize);
} else {
uError("key:%p, failed to added into cache, out of memory, cacheName:%s", key, pCacheObj->cacheName);
uError("cache:%s, key:%p, failed to added into cache, out of memory", pCacheObj->name, key);
}
} else { // old data exists, update the node
pNode = taosUpdateCacheImpl(pCacheObj, pOld, key, keyLen, pData, dataSize, duration * 1000L);
uDebug("key:%p, %p exist in cache, updated, cacheName:%s", key, pNode->data, pCacheObj->cacheName);
uDebug("cache:%s, key:%p, %p exist in cache, updated", pCacheObj->name, key, pNode->data);
}
__cache_unlock(pCacheObj);
......@@ -332,10 +327,10 @@ void *taosCacheAcquireByKey(SCacheObj *pCacheObj, const void *key, size_t keyLen
if (ptNode != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
uDebug("key:%p, %p is retrieved from cache, refcnt:%d, cacheName:%s", key, (*ptNode)->data, ref, pCacheObj->cacheName);
uDebug("cache:%s, key:%p, %p is retrieved from cache, refcnt:%d", pCacheObj->name, key, (*ptNode)->data, ref);
} else {
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
uDebug("key:%p, not in cache, retrieved failed, cacheName:%s", key, pCacheObj->cacheName);
uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key);
}
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
......@@ -360,11 +355,11 @@ void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, void *key, size_t ke
if (ptNode != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
uDebug("key:%p, %p expireTime is updated in cache, refcnt:%d, cacheName:%s", key, (*ptNode)->data,
T_REF_VAL_GET(*ptNode), pCacheObj->cacheName);
uDebug("cache:%s, key:%p, %p expireTime is updated in cache, refcnt:%d", pCacheObj->name, key,
(*ptNode)->data, T_REF_VAL_GET(*ptNode));
} else {
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
uDebug("key:%p, not in cache, retrieved failed, cacheName:%s", key, pCacheObj->cacheName);
uDebug("cache:%s, key:%p, not in cache, retrieved failed", pCacheObj->name, key);
}
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
......@@ -383,7 +378,7 @@ void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) {
}
int32_t ref = T_REF_INC(ptNode);
uDebug("%p acquired by data in cache, refcnt:%d, cacheName:%s", ptNode->data, ref, pCacheObj->cacheName);
uDebug("cache:%s, data: %p acquired by data in cache, refcnt:%d", pCacheObj->name, ptNode->data, ref);
// if the remained life span is less then the (*ptNode)->lifeSpan, add up one lifespan
if (pCacheObj->extendLifespan) {
......@@ -391,7 +386,7 @@ void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) {
if ((now - ptNode->addedTime) < ptNode->lifespan * ptNode->extendFactor) {
ptNode->extendFactor += 1;
uDebug("%p extend life time to %" PRId64, ptNode->data,
uDebug("cache:%s, %p extend life time to %" PRId64, pCacheObj->name, ptNode->data,
ptNode->lifespan * ptNode->extendFactor + ptNode->addedTime);
}
}
......@@ -437,7 +432,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
*data = NULL;
int16_t ref = T_REF_DEC(pNode);
uDebug("key:%p, %p is released, refcnt:%d, cacheName:%s", pNode->key, pNode->data, ref, pCacheObj->cacheName);
uDebug("cache:%s, key:%p, %p is released, refcnt:%d", pCacheObj->name, pNode->key, pNode->data, ref);
if (_remove && (!pNode->inTrashCan)) {
__cache_wr_lock(pCacheObj);
......@@ -455,7 +450,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
}
}
void taosCacheEmpty(SCacheObj *pCacheObj) {
void taosCacheEmpty(SCacheObj *pCacheObj, bool _remove) {
SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable);
__cache_wr_lock(pCacheObj);
......@@ -465,12 +460,16 @@ void taosCacheEmpty(SCacheObj *pCacheObj) {
}
SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
taosCacheMoveToTrash(pCacheObj, pNode);
if (T_REF_VAL_GET(pNode) == 0 || _remove) {
taosCacheReleaseNode(pCacheObj, pNode);
} else {
taosCacheMoveToTrash(pCacheObj, pNode);
}
}
__cache_unlock(pCacheObj);
taosHashDestroyIter(pIter);
taosTrashCanEmpty(pCacheObj, false);
taosTrashCanEmpty(pCacheObj, _remove);
}
void taosCacheCleanup(SCacheObj *pCacheObj) {
......@@ -481,7 +480,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj) {
pCacheObj->deleting = 1;
pthread_join(pCacheObj->refreshWorker, NULL);
uInfo("cacheName:%p, will be cleanuped", pCacheObj->cacheName);
uInfo("cache:%s will be cleaned up", pCacheObj->name);
doCleanupDataCache(pCacheObj);
}
......@@ -601,22 +600,25 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
SHashMutableIterator *pIter = taosHashCreateIter(pCacheObj->pHashTable);
while (taosHashIterNext(pIter)) {
SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
// if (pNode->expiredTime <= expiredTime && T_REF_VAL_GET(pNode) <= 0) {
if (T_REF_VAL_GET(pNode) <= 0) {
int32_t c = T_REF_VAL_GET(pNode);
if (c <= 0) {
taosCacheReleaseNode(pCacheObj, pNode);
} else {
uDebug("key:%p, %p will not remove from cache, refcnt:%d, cacheName:%s", pNode->key, pNode->data,
T_REF_VAL_GET(pNode), pCacheObj->cacheName);
uDebug("cache:%s key:%p, %p will not remove from cache, refcnt:%d", pCacheObj->name, pNode->key,
pNode->data, T_REF_VAL_GET(pNode));
}
}
taosHashDestroyIter(pIter);
taosHashCleanup(pCacheObj->pHashTable);
// todo memory leak if there are object with refcount greater than 0 in hash table?
taosHashCleanup(pCacheObj->pHashTable);
__cache_unlock(pCacheObj);
taosTrashCanEmpty(pCacheObj, true);
__cache_lock_destroy(pCacheObj);
tfree(pCacheObj->name);
memset(pCacheObj, 0, sizeof(SCacheObj));
free(pCacheObj);
}
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stdio.h>
#include <stdlib.h>
#include <error.h>
#include <errno.h>
#include <stdarg.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "os.h"
#define RANDOM_FILE_FAIL_FACTOR 5
ssize_t taos_tread(int fd, void *buf, size_t count)
{
#ifdef TAOS_RANDOM_FILE_FAIL
if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) {
errno = EIO;
return -1;
}
#endif
return tread(fd, buf, count);
}
ssize_t taos_twrite(int fd, void *buf, size_t count)
{
#ifdef TAOS_RANDOM_FILE_FAIL
if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) {
errno = EIO;
return -1;
}
#endif
return twrite(fd, buf, count);
}
off_t taos_lseek(int fd, off_t offset, int whence)
{
#ifdef TAOS_RANDOM_FILE_FAIL
if (rand() % RANDOM_FILE_FAIL_FACTOR == 0) {
errno = EIO;
return -1;
}
#endif
return lseek(fd, offset, whence);
}
......@@ -27,6 +27,7 @@
#include "tcoding.h"
#include "tkvstore.h"
#include "tulog.h"
#include "tfile.h"
#define TD_KVSTORE_HEADER_SIZE 512
#define TD_KVSTORE_MAJOR_VERSION 1
......@@ -581,4 +582,4 @@ _err:
taosHashDestroyIter(pIter);
tfree(buf);
return -1;
}
\ No newline at end of file
}
......@@ -53,7 +53,7 @@ typedef struct {
STsdbCfg tsdbCfg;
SSyncCfg syncCfg;
SWalCfg walCfg;
void *qHandlePool; // query handle pool
void *qMgmt;
char *rootDir;
char db[TSDB_DB_NAME_LEN];
} SVnodeObj;
......
......@@ -34,8 +34,7 @@
#define TSDB_VNODE_VERSION_CONTENT_LEN 31
static int32_t tsOpennedVnodes;
static void *tsDnodeVnodesHash;
static SHashObj*tsDnodeVnodesHash;
static void vnodeCleanUp(SVnodeObj *pVnode);
static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg);
static int32_t vnodeReadCfg(SVnodeObj *pVnode);
......@@ -46,9 +45,6 @@ static uint32_t vnodeGetFileInfo(void *ahandle, char *name, uint32_t *index, uin
static int vnodeGetWalInfo(void *ahandle, char *name, uint32_t *index);
static void vnodeNotifyRole(void *ahandle, int8_t role);
static void vnodeNotifyFileSynced(void *ahandle, uint64_t fversion);
static void vnodeFreeqHandle(void* phandle);
static pthread_once_t vnodeModuleInit = PTHREAD_ONCE_INIT;
#ifndef _SYNC
tsync_h syncStart(const SSyncInfo *info) { return NULL; }
......@@ -59,19 +55,28 @@ int syncGetNodesRole(tsync_h shandle, SNodesRole * cfg) { return 0; }
void syncConfirmForward(tsync_h shandle, uint64_t version, int32_t code) {}
#endif
static void vnodeInit() {
int32_t vnodeInitResources() {
vnodeInitWriteFp();
vnodeInitReadFp();
tsDnodeVnodesHash = taosHashInit(TSDB_MAX_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true);
if (tsDnodeVnodesHash == NULL) {
vError("failed to init vnode list");
return TSDB_CODE_VND_OUT_OF_MEMORY;
}
return TSDB_CODE_SUCCESS;
}
void vnodeCleanupResources() {
if (tsDnodeVnodesHash != NULL) {
taosHashCleanup(tsDnodeVnodesHash);
tsDnodeVnodesHash = NULL;
}
}
int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
int32_t code;
pthread_once(&vnodeModuleInit, vnodeInit);
SVnodeObj *pTemp = (SVnodeObj *)taosHashGet(tsDnodeVnodesHash, (const char *)&pVnodeCfg->cfg.vgId, sizeof(int32_t));
if (pTemp != NULL) {
......@@ -139,11 +144,6 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
}
int32_t vnodeDrop(int32_t vgId) {
if (tsDnodeVnodesHash == NULL) {
vDebug("vgId:%d, failed to drop, vgId not exist", vgId);
return TSDB_CODE_VND_INVALID_VGROUP_ID;
}
SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t));
if (ppVnode == NULL || *ppVnode == NULL) {
vDebug("vgId:%d, failed to drop, vgId not find", vgId);
......@@ -160,6 +160,13 @@ int32_t vnodeDrop(int32_t vgId) {
int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) {
SVnodeObj *pVnode = param;
if (pVnode->status != TAOS_VN_STATUS_READY)
return TSDB_CODE_VND_INVALID_STATUS;
if (pVnode->syncCfg.replica > 1 && pVnode->role == TAOS_SYNC_ROLE_UNSYNCED)
return TSDB_CODE_VND_NOT_SYNCED;
pVnode->status = TAOS_VN_STATUS_UPDATING;
int32_t code = vnodeSaveCfg(pVnodeCfg);
......@@ -182,7 +189,6 @@ int32_t vnodeAlter(void *param, SMDCreateVnodeMsg *pVnodeCfg) {
int32_t vnodeOpen(int32_t vnode, char *rootDir) {
char temp[TSDB_FILENAME_LEN];
pthread_once(&vnodeModuleInit, vnodeInit);
SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1);
if (pVnode == NULL) {
......@@ -190,7 +196,6 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
return TAOS_SYSTEM_ERROR(errno);
}
atomic_add_fetch_32(&tsOpennedVnodes, 1);
atomic_add_fetch_32(&pVnode->refCount, 1);
pVnode->vgId = vnode;
......@@ -283,9 +288,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
if (pVnode->role == TAOS_SYNC_ROLE_MASTER)
cqStart(pVnode->cq);
const int32_t REFRESH_HANDLE_INTERVAL = 2; // every 2 seconds, rfresh handle pool
pVnode->qHandlePool = taosCacheInit(TSDB_DATA_TYPE_BIGINT, REFRESH_HANDLE_INTERVAL, true, vnodeFreeqHandle, "qhandle");
pVnode->qMgmt = qOpenQueryMgmt(pVnode->vgId);
pVnode->events = NULL;
pVnode->status = TAOS_VN_STATUS_READY;
vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode);
......@@ -296,7 +299,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
}
int32_t vnodeStartStream(int32_t vnode) {
SVnodeObj* pVnode = vnodeAccquireVnode(vnode);
SVnodeObj* pVnode = vnodeAcquireVnode(vnode);
if (pVnode != NULL) {
tsdbStartStream(pVnode->tsdb);
vnodeRelease(pVnode);
......@@ -328,6 +331,9 @@ void vnodeRelease(void *pVnodeRaw) {
return;
}
qCleanupQueryMgmt(pVnode->qMgmt);
pVnode->qMgmt = NULL;
if (pVnode->tsdb)
tsdbCloseRepo(pVnode->tsdb, 1);
pVnode->tsdb = NULL;
......@@ -360,19 +366,11 @@ void vnodeRelease(void *pVnodeRaw) {
free(pVnode);
int32_t count = atomic_sub_fetch_32(&tsOpennedVnodes, 1);
int32_t count = taosHashGetSize(tsDnodeVnodesHash);
vDebug("vgId:%d, vnode is released, vnodes:%d", vgId, count);
if (count <= 0) {
taosHashCleanup(tsDnodeVnodesHash);
vnodeModuleInit = PTHREAD_ONCE_INIT;
tsDnodeVnodesHash = NULL;
}
}
void *vnodeGetVnode(int32_t vgId) {
if (tsDnodeVnodesHash == NULL) return NULL;
SVnodeObj **ppVnode = (SVnodeObj **)taosHashGet(tsDnodeVnodesHash, (const char *)&vgId, sizeof(int32_t));
if (ppVnode == NULL || *ppVnode == NULL) {
terrno = TSDB_CODE_VND_INVALID_VGROUP_ID;
......@@ -383,7 +381,7 @@ void *vnodeGetVnode(int32_t vgId) {
return *ppVnode;
}
void *vnodeAccquireVnode(int32_t vgId) {
void *vnodeAcquireVnode(int32_t vgId) {
SVnodeObj *pVnode = vnodeGetVnode(vgId);
if (pVnode == NULL) return pVnode;
......@@ -393,12 +391,21 @@ void *vnodeAccquireVnode(int32_t vgId) {
return pVnode;
}
void *vnodeAcquireRqueue(void *param) {
SVnodeObj *pVnode = param;
if (pVnode == NULL) return NULL;
atomic_add_fetch_32(&pVnode->refCount, 1);
vDebug("vgId:%d, get vnode rqueue, refCount:%d", pVnode->vgId, pVnode->refCount);
return ((SVnodeObj *)pVnode)->rqueue;
}
void *vnodeGetRqueue(void *pVnode) {
return ((SVnodeObj *)pVnode)->rqueue;
}
void *vnodeGetWqueue(int32_t vgId) {
SVnodeObj *pVnode = vnodeAccquireVnode(vgId);
SVnodeObj *pVnode = vnodeAcquireVnode(vgId);
if (pVnode == NULL) return NULL;
return pVnode->wqueue;
}
......@@ -408,8 +415,11 @@ void *vnodeGetWal(void *pVnode) {
}
static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) {
if (pVnode->status == TAOS_VN_STATUS_DELETING) return;
if (pVnode->status != TAOS_VN_STATUS_READY) return;
if (pStatus->openVnodes >= TSDB_MAX_VNODES) return;
if (pVnode->syncCfg.replica > 1 && pVnode->role == TAOS_SYNC_ROLE_UNSYNCED) return;
if (pVnode->tsdb == NULL) return;
int64_t totalStorage, compStorage, pointsWritten = 0;
tsdbReportStat(pVnode->tsdb, &pointsWritten, &totalStorage, &compStorage);
......@@ -424,6 +434,26 @@ static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SDMStatusMsg *pStatus) {
pLoad->replica = pVnode->syncCfg.replica;
}
int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) {
SHashMutableIterator *pIter = taosHashCreateIter(tsDnodeVnodesHash);
while (taosHashIterNext(pIter)) {
SVnodeObj **pVnode = taosHashIterGet(pIter);
if (pVnode == NULL) continue;
if (*pVnode == NULL) continue;
(*numOfVnodes)++;
if (*numOfVnodes >= TSDB_MAX_VNODES) {
vError("vgId:%d, too many open vnodes, exist:%d max:%d", (*pVnode)->vgId, *numOfVnodes, TSDB_MAX_VNODES);
continue;
} else {
vnodeList[*numOfVnodes - 1] = (*pVnode)->vgId;
}
}
taosHashDestroyIter(pIter);
return TSDB_CODE_SUCCESS;
}
void vnodeBuildStatusMsg(void *param) {
SDMStatusMsg *pStatus = param;
SHashMutableIterator *pIter = taosHashCreateIter(tsDnodeVnodesHash);
......@@ -442,7 +472,7 @@ void vnodeBuildStatusMsg(void *param) {
void vnodeSetAccess(SDMVgroupAccess *pAccess, int32_t numOfVnodes) {
for (int32_t i = 0; i < numOfVnodes; ++i) {
pAccess[i].vgId = htonl(pAccess[i].vgId);
SVnodeObj *pVnode = vnodeAccquireVnode(pAccess[i].vgId);
SVnodeObj *pVnode = vnodeAcquireVnode(pAccess[i].vgId);
if (pVnode != NULL) {
pVnode->accessState = pAccess[i].accessState;
if (pVnode->accessState != TSDB_VN_ALL_ACCCESS) {
......@@ -466,7 +496,7 @@ static void vnodeCleanUp(SVnodeObj *pVnode) {
vTrace("vgId:%d, vnode will cleanup, refCount:%d", pVnode->vgId, pVnode->refCount);
// release local resources only after cutting off outside connections
taosCacheCleanup(pVnode->qHandlePool);
qSetQueryMgmtClosed(pVnode->qMgmt);
vnodeRelease(pVnode);
}
......@@ -872,12 +902,3 @@ PARSE_OVER:
if(fp) fclose(fp);
return terrno;
}
void vnodeFreeqHandle(void *qHandle) {
void** handle = qHandle;
if (handle == NULL || *handle == NULL) {
return;
}
qKillQuery(*handle);
}
\ No newline at end of file
......@@ -14,6 +14,7 @@
*/
#define _DEFAULT_SOURCE
#include <dnode.h>
#include "os.h"
#include "tglobal.h"
......@@ -45,9 +46,9 @@ int32_t vnodeProcessRead(void *param, SReadMsg *pReadMsg) {
return TSDB_CODE_VND_MSG_NOT_PROCESSED;
}
if (pVnode->status == TAOS_VN_STATUS_DELETING || pVnode->status == TAOS_VN_STATUS_CLOSING) {
if (pVnode->status != TAOS_VN_STATUS_READY) {
vDebug("vgId:%d, msgType:%s not processed, vnode status is %d", pVnode->vgId, taosMsg[msgType], pVnode->status);
return TSDB_CODE_VND_INVALID_VGROUP_ID;
return TSDB_CODE_VND_INVALID_STATUS;
}
// TODO: Later, let slave to support query
......@@ -73,18 +74,22 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
killQueryMsg->free = htons(killQueryMsg->free);
killQueryMsg->qhandle = htobe64(killQueryMsg->qhandle);
vWarn("QInfo:%p connection %p broken, kill query", (void*)killQueryMsg->qhandle, pReadMsg->rpcMsg.handle);
void* handle = NULL;
if ((void**) killQueryMsg->qhandle != NULL) {
handle = *(void**) killQueryMsg->qhandle;
}
vWarn("QInfo:%p connection %p broken, kill query", handle, pReadMsg->rpcMsg.handle);
assert(pReadMsg->rpcMsg.contLen > 0 && killQueryMsg->free == 1);
// this message arrived here by means of the *query* message, so release the vnode is necessary
void** qhandle = taosCacheAcquireByKey(pVnode->qHandlePool, (void*) &killQueryMsg->qhandle, sizeof(killQueryMsg->qhandle));
void** qhandle = qAcquireQInfo(pVnode->qMgmt, (void**) killQueryMsg->qhandle);
if (qhandle == NULL || *qhandle == NULL) {
vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void*) killQueryMsg->qhandle, pReadMsg->rpcMsg.handle);
} else {
taosCacheRelease(pVnode->qHandlePool, (void**) &qhandle, true);
assert(qhandle == (void**) killQueryMsg->qhandle);
qReleaseQInfo(pVnode->qMgmt, (void**) &qhandle, true);
}
vnodeRelease(pVnode);
return TSDB_CODE_TSC_QUERY_CANCELLED;
}
......@@ -93,7 +98,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
void** handle = NULL;
if (contLen != 0) {
code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, vnodeRelease, &pQInfo);
code = qCreateQueryInfo(pVnode->tsdb, pVnode->vgId, pQueryTableMsg, pVnode, NULL, &pQInfo);
SQueryTableRsp *pRsp = (SQueryTableRsp *) rpcMallocCont(sizeof(SQueryTableRsp));
pRsp->code = code;
......@@ -105,25 +110,30 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
// current connect is broken
if (code == TSDB_CODE_SUCCESS) {
if (vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, pQInfo, pVnode->vgId) != TSDB_CODE_SUCCESS) {
vError("vgId:%d, QInfo:%p, dnode query discarded since link is broken, %p", pVnode->vgId, pQInfo,
pReadMsg->rpcMsg.handle);
pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
handle = qRegisterQInfo(pVnode->qMgmt, pQInfo);
if (handle == NULL) { // failed to register qhandle
pRsp->code = TSDB_CODE_QRY_INVALID_QHANDLE;
// NOTE: there two refcount, needs to kill twice, todo refactor
// query has not been put into qhandle pool, kill it directly.
qKillQuery(pQInfo);
qKillQuery(pQInfo);
pQInfo = NULL;
} else {
assert(*handle == pQInfo);
pRsp->qhandle = htobe64((uint64_t) (handle));
}
if (handle != NULL && vnodeNotifyCurrentQhandle(pReadMsg->rpcMsg.handle, handle, pVnode->vgId) != TSDB_CODE_SUCCESS) {
vError("vgId:%d, QInfo:%p, query discarded since link is broken, %p", pVnode->vgId, pQInfo, pReadMsg->rpcMsg.handle);
pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
// NOTE: there two refcount, needs to kill twice
// query has not been put into qhandle pool, kill it directly.
qKillQuery(pQInfo);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
return pRsp->code;
}
handle = taosCachePut(pVnode->qHandlePool, pQInfo, sizeof(pQInfo), &pQInfo, sizeof(pQInfo), tsShellActivityTimer * 2);
assert(*handle == pQInfo);
pRsp->qhandle = htobe64((uint64_t) (handle));
} else {
assert(pQInfo == NULL);
vnodeRelease(pVnode);
}
vDebug("vgId:%d, QInfo:%p, dnode query msg disposed", vgId, pQInfo);
......@@ -138,9 +148,8 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
if (pQInfo != NULL) {
qTableQuery(pQInfo); // do execute query
assert(handle != NULL);
taosCacheRelease(pVnode->qHandlePool, (void**) &handle, false);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, false);
}
return code;
......@@ -159,7 +168,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
memset(pRet, 0, sizeof(SRspRet));
int32_t ret = 0;
void** handle = taosCacheAcquireByKey(pVnode->qHandlePool, pQInfo, sizeof(pQInfo));
void** handle = qAcquireQInfo(pVnode->qMgmt, pQInfo);
if (handle == NULL || handle != pQInfo) {
ret = TSDB_CODE_QRY_INVALID_QHANDLE;
}
......@@ -167,8 +176,8 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
if (pRetrieve->free == 1) {
if (ret == TSDB_CODE_SUCCESS) {
vDebug("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pQInfo);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
taosCacheRelease(pVnode->qHandlePool, (void**) &handle, true);
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
pRet->len = sizeof(SRetrieveTableRsp);
......@@ -178,30 +187,30 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SReadMsg *pReadMsg) {
pRsp->completed = true;
pRsp->useconds = 0;
} else { // todo handle error
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
}
return ret;
}
vDebug("vgId:%d, QInfo:%p, retrieve msg is received", pVnode->vgId, *pQInfo);
int32_t code = qRetrieveQueryResultInfo(*pQInfo);
if (code != TSDB_CODE_SUCCESS) {
if (code != TSDB_CODE_SUCCESS || ret != TSDB_CODE_SUCCESS) {
//TODO
pRet->rsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp));
memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp));
} else {
// todo check code and handle error in build result set
code = qDumpRetrieveResult(*pQInfo, (SRetrieveTableRsp **)&pRet->rsp, &pRet->len);
if (qHasMoreResultsToRetrieve(*pQInfo)) {
if (qHasMoreResultsToRetrieve(*handle)) {
dnodePutItemIntoReadQueue(pVnode, handle);
pRet->qhandle = handle;
code = TSDB_CODE_VND_ACTION_NEED_REPROCESSED;
code = TSDB_CODE_SUCCESS;
} else { // no further execution invoked, release the ref to vnode
taosCacheRelease(pVnode->qHandlePool, (void**) &handle, true);
qReleaseQInfo(pVnode->qMgmt, (void**) &handle, true);
}
}
return code;
}
......@@ -216,4 +225,4 @@ int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId) {
vDebug("QInfo:%p register qhandle to connect:%p", qhandle, handle);
return rpcReportProgress(handle, (char*) killQueryMsg, sizeof(SRetrieveTableMsg));
}
\ No newline at end of file
}
......@@ -58,7 +58,7 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) {
if (pHead->version == 0) { // from client or CQ
if (pVnode->status != TAOS_VN_STATUS_READY)
return TSDB_CODE_VND_INVALID_VGROUP_ID; // it may be in deleting or closing state
return TSDB_CODE_VND_INVALID_STATUS; // it may be in deleting or closing state
if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER)
return TSDB_CODE_RPC_NOT_READY;
......@@ -89,21 +89,25 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) {
return syncCode;
}
void vnodeConfirmForward(void *param, uint64_t version, int32_t code) {
SVnodeObj *pVnode = (SVnodeObj *)param;
syncConfirmForward(pVnode->sync, version, code);
}
static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet) {
int32_t code = TSDB_CODE_SUCCESS;
vTrace("vgId:%d, submit msg is processed", pVnode->vgId);
// save insert result into item
SShellSubmitRspMsg *pRsp = NULL;
if (pRet) {
pRet->len = sizeof(SShellSubmitRspMsg);
pRet->rsp = rpcMallocCont(pRet->len);
pRsp = pRet->rsp;
}
vTrace("vgId:%d, submit msg is processed", pVnode->vgId);
pRet->len = sizeof(SShellSubmitRspMsg);
pRet->rsp = rpcMallocCont(pRet->len);
SShellSubmitRspMsg *pRsp = pRet->rsp;
if (tsdbInsertData(pVnode->tsdb, pCont, pRsp) < 0) code = terrno;
pRsp->numOfFailedBlocks = 0; //TODO
//pRet->len += pRsp->numOfFailedBlocks * sizeof(SShellSubmitRspBlock); //TODO
pRsp->code = 0;
pRsp->numOfRows = htonl(1);
return code;
}
......@@ -158,7 +162,7 @@ static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pCont, SRspRet
}
static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet) {
if (tsdbUpdateTagValue(pVnode->tsdb, (SUpdateTableTagValMsg *)pCont) < 0) {
if (tsdbUpdateTableTagValue(pVnode->tsdb, (SUpdateTableTagValMsg *)pCont) < 0) {
return terrno;
}
return TSDB_CODE_SUCCESS;
......
......@@ -94,7 +94,7 @@
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>18.0</version>
<version>24.1.1</version>
</dependency>
<dependency>
......
......@@ -52,8 +52,7 @@ class TDTestCase:
# illegal condition
tdSql.error(
"select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2")
tdSql.error("select * from db.st where tagtype <> 1 OR tagtype < 2")
"select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2")
def stop(self):
tdSql.close()
......
......@@ -36,18 +36,17 @@ class TDTestCase:
"insert into tb2 using stb1 tags(2,'tb2', '表2') values ('2020-04-18 15:00:02.000', 3, 2.1), ('2020-04-18 15:00:03.000', 4, 2.2)")
# inner join --- bug
tdSql.query("select * from tb1 a, tb2 b where a.ts = b.ts")
tdSql.checkRows(1)
tdSql.error("select * from tb1 a, tb2 b where a.ts = b.ts")
# join 3 tables -- bug exists
tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
tdSql.error("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
# query show stable
tdSql.query("show stables")
tdSql.checkRows(1)
# query show tables
tdSql.query("show table")
tdSql.query("show tables")
tdSql.checkRows(2)
# query count
......@@ -71,16 +70,13 @@ class TDTestCase:
tdSql.checkRows(2)
# query first ... as
tdSql.query("select first(*) as begin from stb1")
tdSql.checkData(0, 1, 1)
tdSql.error("select first(*) as begin from stb1")
# query last ... as
tdSql.query("select last(*) as end from stb1")
tdSql.checkData(0, 1, 4)
tdSql.error("select last(*) as end from stb1")
# query last_row ... as
tdSql.query("select last_row(*) as end from stb1")
tdSql.checkData(0, 1, 4)
tdSql.error("select last_row(*) as end from stb1")
# query group .. by
tdSql.query("select sum(c1), t2 from stb1 group by t2")
......@@ -95,8 +91,7 @@ class TDTestCase:
tdSql.checkRows(1)
# query ... alias for table ---- bug
tdSql.query("select t.ts from tb1 t")
tdSql.checkRows(2)
tdSql.error("select t.ts from tb1 t")
# query ... tbname
tdSql.query("select tbname from stb1")
......@@ -104,7 +99,7 @@ class TDTestCase:
# query ... tbname count ---- bug
tdSql.query("select count(tbname) from stb1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 2)
# query ... select database ---- bug
tdSql.query("SELECT database()")
......
......@@ -40,11 +40,7 @@ class TDTestCase:
tdSql.query("select last(*) from st")
tdSql.checkRows(1)
print(
"======= Verify filter for %s type finished =========" %
curType)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
......
......@@ -365,3 +365,7 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/sync_replica3_dropDb.sim
./test.sh -f unique/arbitrator/sync_replica3_dropTable.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim
......@@ -133,3 +133,7 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/sync_replica3_dropDb.sim
./test.sh -f unique/arbitrator/sync_replica3_dropTable.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeDir.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim
./test.sh -f unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim
# Test case describe: dnode1/dnode2 include mnode and vnode roles
# step 1: start dnode1/dnode2, and added into cluster
# step 2: create db(repl = 2), table, insert data,
# step 4: stop dnode1, remove its mnode dir, and copy mnode dir of dnode2 to dnode1
# step 5: restart dnode1, waiting sync end
# step 6: stop dnode2, reset query cache, and query
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
#system sh/deploy.sh -n dnode3 -i 3
#system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
#system sh/cfg.sh -n dnode3 -c walLevel -v 2
#system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1/dnode2 and add into cluster
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 1000
sql connect
sleep 1000
sql create dnode $hostname2
sleep 1000
print ============== step2: create database with replica 2, and create table, insert data
$totalTableNum = 10
$sleepTimer = 3000
$db = db
sql create database $db replica 2 cache 1
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 1200
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
$ts = $tsStart + $x
sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
$x = $x + 60
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
print rows:$rows data00:$data00 totalRows:$totalRows
if $rows != 1 then
return -1
endi
if $data00 != $totalRows then
return -1
endi
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now - 40d , -40 )
$totalRows = $totalRows + 2
print ============== step4: stop dnode1
system sh/exec.sh -n dnode1 -s stop -x SIGINT
$loopCnt = 0
wait_dnode1_offline:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode1_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != offline then
sleep 2000
goto wait_dnode1_offline
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode1_offline
endi
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
#sql show vgroups
#print show vgroups:
#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
system_content rm -rf ../../../sim/dnode1/data/mnode
system_content cp -rf ../../../sim/dnode2/data/mnode ../../../sim/dnode1/data/
print ============== step6: restart dnode1, waiting sync end
system sh/exec.sh -n dnode1 -s start
sleep 1000
$loopCnt = 0
wait_dnode1_ready:
$loopCnt = $loopCnt + 1
if $loopCnt == 20 then
return -1
endi
sql show dnodes -x wait_dnode1_ready
if $rows != 2 then
sleep 2000
goto wait_dnode1_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
$loopCnt = 0
wait_dnode1_vgroup_slave:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show vgroups
if $rows != 3 then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
$d2v2status = $data4_4
$d2v3status = $data4_2
$d2v4status = $data4_3
$d1v2status = $data7_4
$d1v3status = $data7_2
$d1v4status = $data7_3
if $d2v2status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v3status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v4status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v2status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v3status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v4status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
print ============== step7: stop dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
$loopCnt = 0
wait_dnode2_offline:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode2_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != ready then
sleep 2000
goto wait_dnode2_offline
endi
if $dnode2Status != offline then
sleep 2000
goto wait_dnode2_offline
endi
sql reset query cache
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
\ No newline at end of file
# Test case describe: dnode1/dnode2 include mnode and vnode roles
# step 1: start dnode1/dnode2, and added into cluster
# step 2: create db(repl = 2), table, insert data,
# step 4: stop dnode1, remove its mnode and vnode dir, and copy mnode and vnode dir of dnode2 to dnode1
# step 5: restart dnode1, waiting sync end
# step 6: stop dnode2, reset query cache, and query
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
#system sh/deploy.sh -n dnode3 -i 3
#system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
#system sh/cfg.sh -n dnode3 -c walLevel -v 2
#system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1/dnode2 and add into cluster
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 1000
sql connect
sleep 1000
sql create dnode $hostname2
sleep 1000
print ============== step2: create database with replica 2, and create table, insert data
$totalTableNum = 10
$sleepTimer = 3000
$db = db
sql create database $db replica 2 cache 1
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 1200
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
$ts = $tsStart + $x
sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
$x = $x + 60
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
print rows:$rows data00:$data00 totalRows:$totalRows
if $rows != 1 then
return -1
endi
if $data00 != $totalRows then
return -1
endi
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now - 40d , -40 )
$totalRows = $totalRows + 2
print ============== step4: stop dnode1
system sh/exec.sh -n dnode1 -s stop -x SIGINT
$loopCnt = 0
wait_dnode1_offline:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode1_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != offline then
sleep 2000
goto wait_dnode1_offline
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode1_offline
endi
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
#sql show vgroups
#print show vgroups:
#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
system_content rm -rf ../../../sim/dnode1/data/vnode
system_content rm -rf ../../../sim/dnode1/data/mnode
system_content cp -rf ../../../sim/dnode2/data/vnode ../../../sim/dnode1/data/
system_content cp -rf ../../../sim/dnode2/data/mnode ../../../sim/dnode1/data/
print ============== step6: restart dnode1, waiting sync end
system sh/exec.sh -n dnode1 -s start
sleep 1000
$loopCnt = 0
wait_dnode1_ready:
$loopCnt = $loopCnt + 1
if $loopCnt == 20 then
return -1
endi
sql show dnodes -x wait_dnode1_ready
if $rows != 2 then
sleep 2000
goto wait_dnode1_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
$loopCnt = 0
wait_dnode1_vgroup_slave:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show vgroups
if $rows != 3 then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
$d2v2status = $data4_4
$d2v3status = $data4_2
$d2v4status = $data4_3
$d1v2status = $data7_4
$d1v3status = $data7_2
$d1v4status = $data7_3
if $d2v2status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v3status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v4status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v2status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v3status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v4status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
print ============== step7: stop dnode2
system sh/exec.sh -n dnode2 -s stop -x SIGINT
$loopCnt = 0
wait_dnode2_offline:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show dnodes
if $rows != 2 then
sleep 2000
goto wait_dnode2_offline
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != ready then
sleep 2000
goto wait_dnode2_offline
endi
if $dnode2Status != offline then
sleep 2000
goto wait_dnode2_offline
endi
sql reset query cache
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
\ No newline at end of file
# Test case describe: dnode1/dnode2 include mnode and vnode roles
# step 1: start dnode1/dnode2, and added into cluster
# step 2: create db(repl = 2), table, insert data,
# step 4: stop dnode1, remove its mnode and vnode dir, and copy mnode and vnode dir of dnode2 to dnode1
# step 5: restart dnode1, waiting sync end
# step 6: stop dnode2, reset query cache, and query
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
#system sh/deploy.sh -n dnode3 -i 3
#system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 2
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 2
#system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1
#system sh/cfg.sh -n dnode4 -c numOfMnodes -v 1
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
#system sh/cfg.sh -n dnode3 -c walLevel -v 2
#system sh/cfg.sh -n dnode4 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
#system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
#system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
#system sh/cfg.sh -n dnode3 -c alternativeRole -v 2
#system sh/cfg.sh -n dnode4 -c alternativeRole -v 2
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 4
#system sh/cfg.sh -n dnode5 -c maxtablesPerVnode -v 4
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
#system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1/dnode2 and add into cluster
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 1000
sql connect
sleep 1000
sql create dnode $hostname2
sleep 1000
print ============== step2: create database with replica 2, and create table, insert data
$totalTableNum = 10
$sleepTimer = 3000
$db = db
sql create database $db replica 2 cache 1
sql use $db
# create table , insert data
$stb = stb
sql create table $stb (ts timestamp, c1 double) tags(t1 int)
$rowNum = 1200
$tblNum = $totalTableNum
$totalRows = 0
$tsStart = 1577808000000 # 2020-01-01 00:00:00.000
$i = 0
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags( $i )
$x = 0
while $x < $rowNum
$ts = $tsStart + $x
sql insert into $tb values ( $ts + 0a , $x ) ( $ts + 1a , $x ) ( $ts + 2a , $x ) ( $ts + 3a , $x ) ( $ts + 4a , $x ) ( $ts + 5a , $x ) ( $ts + 6a , $x ) ( $ts + 7a , $x ) ( $ts + 8a , $x ) ( $ts + 9a , $x ) ( $ts + 10a , $x ) ( $ts + 11a , $x ) ( $ts + 12a , $x ) ( $ts + 13a , $x ) ( $ts + 14a , $x ) ( $ts + 15a , $x ) ( $ts + 16a , $x ) ( $ts + 17a , $x ) ( $ts + 18a , $x ) ( $ts + 19a , $x ) ( $ts + 20a , $x ) ( $ts + 21a , $x ) ( $ts + 22a , $x ) ( $ts + 23a , $x ) ( $ts + 24a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 25a , $x ) ( $ts + 26a , $x ) ( $ts + 27a , $x ) ( $ts + 28a , $x ) ( $ts + 29a , $x ) ( $ts + 30a , $x ) ( $ts + 31a , $x ) ( $ts + 32a , $x ) ( $ts + 33a , $x ) ( $ts + 34a , $x ) ( $ts + 35a , $x ) ( $ts + 36a , $x ) ( $ts + 37a , $x ) ( $ts + 38a , $x ) ( $ts + 39a , $x ) ( $ts + 40a , $x ) ( $ts + 41a , $x ) ( $ts + 42a , $x ) ( $ts + 43a , $x ) ( $ts + 44a , $x ) ( $ts + 45a , $x ) ( $ts + 46a , $x ) ( $ts + 47a , $x ) ( $ts + 48a , $x ) ( $ts + 49a , $x ) ( $ts + 50a , $x ) ( $ts + 51a , $x ) ( $ts + 52a , $x ) ( $ts + 53a , $x ) ( $ts + 54a , $x ) ( $ts + 55a , $x ) ( $ts + 56a , $x ) ( $ts + 57a , $x ) ( $ts + 58a , $x ) ( $ts + 59a , $x )
$x = $x + 60
endw
$totalRows = $totalRows + $x
print info: inserted $x rows into $tb and totalRows: $totalRows
$i = $i + 1
endw
sql select count(*) from $stb
print rows:$rows data00:$data00 totalRows:$totalRows
if $rows != 1 then
return -1
endi
if $data00 != $totalRows then
return -1
endi
print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc
sql insert into $tb values ( now - 20d , -20 )
sql insert into $tb values ( now - 40d , -40 )
$totalRows = $totalRows + 2
print ============== step4: stop dnode1
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
print ============== step5: remove the mnode dir of dnode1, then copy the monde dir of dnode2
system_content rm -rf ../../../sim/dnode1/data/vnode
system_content rm -rf ../../../sim/dnode1/data/mnode
system_content cp -rf ../../../sim/dnode2/data/vnode ../../../sim/dnode1/data/
system_content cp -rf ../../../sim/dnode2/data/mnode ../../../sim/dnode1/data/
print ============== step6: restart dnode1/dnode2
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sleep 1000
sql connect
sql use $db
$loopCnt = 0
wait_dnode1_ready:
$loopCnt = $loopCnt + 1
if $loopCnt == 20 then
return -1
endi
sql show dnodes -x wait_dnode1_ready
if $rows != 2 then
sleep 2000
goto wait_dnode1_ready
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
$dnode1Status = $data4_1
$dnode2Status = $data4_2
if $dnode1Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
if $dnode2Status != ready then
sleep 2000
goto wait_dnode1_ready
endi
$loopCnt = 0
wait_dnode1_vgroup_slave:
$loopCnt = $loopCnt + 1
if $loopCnt == 10 then
return -1
endi
sql show vgroups
if $rows != 3 then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
$d2v2status = $data4_4
$d2v3status = $data4_2
$d2v4status = $data4_3
$d1v2status = $data7_4
$d1v3status = $data7_2
$d1v4status = $data7_3
if $d2v2status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v3status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d2v4status != master then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v2status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v3status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
if $d1v4status != slave then
sleep 2000
goto wait_dnode1_vgroup_slave
endi
sql reset query cache
# check using select
sql select count(*) from $stb
print data00 $data00
if $data00 != $totalRows then
return -1
endi
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册