提交 370fadb0 编写于 作者: H Haojun Liao

Merge branch 'develop' into feature/query

from .cinterface import CTaosInterface from .cinterface import CTaosInterface
from .error import * from .error import *
from .constants import FieldType from .constants import FieldType
import threading
# querySeqNum = 0 # querySeqNum = 0
...@@ -37,6 +38,7 @@ class TDengineCursor(object): ...@@ -37,6 +38,7 @@ class TDengineCursor(object):
self._block_iter = 0 self._block_iter = 0
self._affected_rows = 0 self._affected_rows = 0
self._logfile = "" self._logfile = ""
self._threadId = threading.get_ident()
if connection is not None: if connection is not None:
self._connection = connection self._connection = connection
...@@ -103,6 +105,12 @@ class TDengineCursor(object): ...@@ -103,6 +105,12 @@ class TDengineCursor(object):
def execute(self, operation, params=None): def execute(self, operation, params=None):
"""Prepare and execute a database operation (query or command). """Prepare and execute a database operation (query or command).
""" """
# if threading.get_ident() != self._threadId:
# info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
# raise OperationalError(info)
# print(info)
# return None
if not operation: if not operation:
return None return None
...@@ -188,6 +196,11 @@ class TDengineCursor(object): ...@@ -188,6 +196,11 @@ class TDengineCursor(object):
def fetchall(self): def fetchall(self):
"""Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. """Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation.
""" """
# if threading.get_ident() != self._threadId:
# info ="[WARNING] Cursor fetchall:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
# raise OperationalError(info)
# print(info)
# return None
if self._result is None or self._fields is None: if self._result is None or self._fields is None:
raise OperationalError("Invalid use of fetchall") raise OperationalError("Invalid use of fetchall")
...@@ -232,6 +245,12 @@ class TDengineCursor(object): ...@@ -232,6 +245,12 @@ class TDengineCursor(object):
def _handle_result(self): def _handle_result(self):
"""Handle the return result from query. """Handle the return result from query.
""" """
# if threading.get_ident() != self._threadId:
# info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
# raise OperationalError(info)
# print(info)
# return None
self._description = [] self._description = []
for ele in self._fields: for ele in self._fields:
self._description.append( self._description.append(
......
...@@ -131,8 +131,8 @@ static void dnodeFreeMnodeWriteMsg(SMnodeMsg *pWrite) { ...@@ -131,8 +131,8 @@ static void dnodeFreeMnodeWriteMsg(SMnodeMsg *pWrite) {
taosFreeQitem(pWrite); taosFreeQitem(pWrite);
} }
void dnodeSendRpcMnodeWriteRsp(void *pRaw, int32_t code) { void dnodeSendRpcMnodeWriteRsp(void *pMsg, int32_t code) {
SMnodeMsg *pWrite = pRaw; SMnodeMsg *pWrite = pMsg;
if (pWrite == NULL) return; if (pWrite == NULL) return;
if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return; if (code == TSDB_CODE_MND_ACTION_IN_PROGRESS) return;
if (code == TSDB_CODE_MND_ACTION_NEED_REPROCESSED) { if (code == TSDB_CODE_MND_ACTION_NEED_REPROCESSED) {
......
...@@ -206,9 +206,10 @@ static void shellSourceFile(TAOS *con, char *fptr) { ...@@ -206,9 +206,10 @@ static void shellSourceFile(TAOS *con, char *fptr) {
if (code != 0) { if (code != 0) {
fprintf(stderr, "DB error: %s: %s (%d)\n", taos_errstr(con), fname, lineNo); fprintf(stderr, "DB error: %s: %s (%d)\n", taos_errstr(con), fname, lineNo);
/* free local resouce: allocated memory/metric-meta refcnt */
taos_free_result(pSql);
} }
/* free local resouce: allocated memory/metric-meta refcnt */
taos_free_result(pSql);
memset(cmd, 0, MAX_COMMAND_SIZE); memset(cmd, 0, MAX_COMMAND_SIZE);
cmd_len = 0; cmd_len = 0;
......
...@@ -520,9 +520,8 @@ int main(int argc, char *argv[]) { ...@@ -520,9 +520,8 @@ int main(int argc, char *argv[]) {
snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols); snprintf(command, BUFFER_SIZE, "create table if not exists %s.meters (ts timestamp%s tags (areaid int, loc binary(10))", db_name, cols);
queryDB(taos, command); queryDB(taos, command);
printf("meters created!\n"); printf("meters created!\n");
taos_close(taos);
} }
taos_close(taos);
/* Wait for table to create */ /* Wait for table to create */
multiThreadCreateTable(cols, use_metric, threads, ntables, db_name, tb_prefix, ip_addr, port, user, pass); multiThreadCreateTable(cols, use_metric, threads, ntables, db_name, tb_prefix, ip_addr, port, user, pass);
...@@ -792,9 +791,6 @@ void * createTable(void *sarg) ...@@ -792,9 +791,6 @@ void * createTable(void *sarg)
snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", winfo->db_name, winfo->tb_prefix, i, winfo->cols); snprintf(command, BUFFER_SIZE, "create table if not exists %s.%s%d (ts timestamp%s;", winfo->db_name, winfo->tb_prefix, i, winfo->cols);
queryDB(winfo->taos, command); queryDB(winfo->taos, command);
} }
taos_close(winfo->taos);
} else { } else {
/* Create all the tables; */ /* Create all the tables; */
printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id); printf("Creating table from %d to %d\n", winfo->start_table_id, winfo->end_table_id);
...@@ -812,7 +808,6 @@ void * createTable(void *sarg) ...@@ -812,7 +808,6 @@ void * createTable(void *sarg)
} }
queryDB(winfo->taos, command); queryDB(winfo->taos, command);
} }
taos_close(winfo->taos);
} }
return NULL; return NULL;
......
...@@ -53,6 +53,7 @@ typedef struct { ...@@ -53,6 +53,7 @@ typedef struct {
void * rowData; void * rowData;
int32_t rowSize; int32_t rowSize;
int32_t retCode; // for callback in sdb queue int32_t retCode; // for callback in sdb queue
int32_t processedCount; // for sync fwd callback
int32_t (*cb)(struct SMnodeMsg *pMsg, int32_t code); int32_t (*cb)(struct SMnodeMsg *pMsg, int32_t code);
struct SMnodeMsg *pMsg; struct SMnodeMsg *pMsg;
} SSdbOper; } SSdbOper;
......
...@@ -88,13 +88,13 @@ static int32_t mnodeDnodeActionDelete(SSdbOper *pOper) { ...@@ -88,13 +88,13 @@ static int32_t mnodeDnodeActionDelete(SSdbOper *pOper) {
} }
static int32_t mnodeDnodeActionUpdate(SSdbOper *pOper) { static int32_t mnodeDnodeActionUpdate(SSdbOper *pOper) {
SDnodeObj *pDnode = pOper->pObj; SDnodeObj *pNew = pOper->pObj;
SDnodeObj *pSaved = mnodeGetDnode(pDnode->dnodeId); SDnodeObj *pDnode = mnodeGetDnode(pNew->dnodeId);
if (pSaved != NULL && pDnode != pSaved) { if (pDnode != NULL && pNew != pDnode) {
memcpy(pSaved, pDnode, pOper->rowSize); memcpy(pDnode, pNew, pOper->rowSize);
free(pDnode); free(pNew);
mnodeDecDnodeRef(pSaved);
} }
mnodeDecDnodeRef(pDnode);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
......
...@@ -72,8 +72,6 @@ typedef struct { ...@@ -72,8 +72,6 @@ typedef struct {
void * sync; void * sync;
void * wal; void * wal;
SSyncCfg cfg; SSyncCfg cfg;
sem_t sem;
int32_t code;
int32_t numOfTables; int32_t numOfTables;
SSdbTable *tableList[SDB_TABLE_MAX]; SSdbTable *tableList[SDB_TABLE_MAX];
pthread_mutex_t mutex; pthread_mutex_t mutex;
...@@ -244,27 +242,36 @@ static void sdbNotifyRole(void *ahandle, int8_t role) { ...@@ -244,27 +242,36 @@ static void sdbNotifyRole(void *ahandle, int8_t role) {
sdbUpdateMnodeRoles(); sdbUpdateMnodeRoles();
} }
FORCE_INLINE
static void sdbConfirmForward(void *ahandle, void *param, int32_t code) { static void sdbConfirmForward(void *ahandle, void *param, int32_t code) {
tsSdbObj.code = code; assert(param);
sem_post(&tsSdbObj.sem); SSdbOper * pOper = param;
sdbDebug("forward request confirmed, version:%" PRIu64 ", result:%s", (int64_t)param, tstrerror(code)); SMnodeMsg *pMsg = pOper->pMsg;
} if (code <= 0) pOper->retCode = code;
int32_t processedCount = atomic_add_fetch_32(&pOper->processedCount, 1);
if (processedCount <= 1) {
if (pMsg != NULL) {
sdbDebug("app:%p:%p, waiting for confirm this operation, count:%d", pMsg->rpcMsg.ahandle, pMsg, processedCount);
}
return;
}
static int32_t sdbForwardToPeer(SWalHead *pHead) { if (pMsg != NULL) {
if (tsSdbObj.sync == NULL) return TSDB_CODE_SUCCESS; sdbDebug("app:%p:%p, is confirmed and will do callback func", pMsg->rpcMsg.ahandle, pMsg);
}
int32_t code = syncForwardToPeer(tsSdbObj.sync, pHead, (void*)pHead->version, TAOS_QTYPE_RPC); if (pOper->cb != NULL) {
if (code > 0) { pOper->retCode = (*pOper->cb)(pMsg, pOper->retCode);
sdbDebug("forward request is sent, version:%" PRIu64 ", code:%d", pHead->version, code); }
sem_wait(&tsSdbObj.sem);
return tsSdbObj.code; dnodeSendRpcMnodeWriteRsp(pMsg, pOper->retCode);
} taosFreeQitem(pOper);
return code;
} }
void sdbUpdateSync() { void sdbUpdateSync() {
SSyncCfg syncCfg = {0}; SSyncCfg syncCfg = {0};
int32_t index = 0; int32_t index = 0;
SDMMnodeInfos *mnodes = dnodeGetMnodeInfos(); SDMMnodeInfos *mnodes = dnodeGetMnodeInfos();
for (int32_t i = 0; i < mnodes->nodeNum; ++i) { for (int32_t i = 0; i < mnodes->nodeNum; ++i) {
...@@ -298,7 +305,7 @@ void sdbUpdateSync() { ...@@ -298,7 +305,7 @@ void sdbUpdateSync() {
} }
syncCfg.replica = index; syncCfg.replica = index;
syncCfg.quorum = (syncCfg.replica == 1) ? 1:2; syncCfg.quorum = (syncCfg.replica == 1) ? 1 : 2;
bool hasThisDnode = false; bool hasThisDnode = false;
for (int32_t i = 0; i < syncCfg.replica; ++i) { for (int32_t i = 0; i < syncCfg.replica; ++i) {
...@@ -325,10 +332,10 @@ void sdbUpdateSync() { ...@@ -325,10 +332,10 @@ void sdbUpdateSync() {
syncInfo.getWalInfo = sdbGetWalInfo; syncInfo.getWalInfo = sdbGetWalInfo;
syncInfo.getFileInfo = sdbGetFileInfo; syncInfo.getFileInfo = sdbGetFileInfo;
syncInfo.writeToCache = sdbWriteToQueue; syncInfo.writeToCache = sdbWriteToQueue;
syncInfo.confirmForward = sdbConfirmForward; syncInfo.confirmForward = sdbConfirmForward;
syncInfo.notifyRole = sdbNotifyRole; syncInfo.notifyRole = sdbNotifyRole;
tsSdbObj.cfg = syncCfg; tsSdbObj.cfg = syncCfg;
if (tsSdbObj.sync) { if (tsSdbObj.sync) {
syncReconfig(tsSdbObj.sync, &syncCfg); syncReconfig(tsSdbObj.sync, &syncCfg);
} else { } else {
...@@ -339,7 +346,6 @@ void sdbUpdateSync() { ...@@ -339,7 +346,6 @@ void sdbUpdateSync() {
int32_t sdbInit() { int32_t sdbInit() {
pthread_mutex_init(&tsSdbObj.mutex, NULL); pthread_mutex_init(&tsSdbObj.mutex, NULL);
sem_init(&tsSdbObj.sem, 0, 0);
if (sdbInitWriteWorker() != 0) { if (sdbInitWriteWorker() != 0) {
return -1; return -1;
...@@ -379,7 +385,6 @@ void sdbCleanUp() { ...@@ -379,7 +385,6 @@ void sdbCleanUp() {
tsSdbObj.wal = NULL; tsSdbObj.wal = NULL;
} }
sem_destroy(&tsSdbObj.sem);
pthread_mutex_destroy(&tsSdbObj.mutex); pthread_mutex_destroy(&tsSdbObj.mutex);
} }
...@@ -513,24 +518,22 @@ static int sdbWrite(void *param, void *data, int type) { ...@@ -513,24 +518,22 @@ static int sdbWrite(void *param, void *data, int type) {
assert(pTable != NULL); assert(pTable != NULL);
pthread_mutex_lock(&tsSdbObj.mutex); pthread_mutex_lock(&tsSdbObj.mutex);
if (pHead->version == 0) { if (pHead->version == 0) {
// assign version // assign version
tsSdbObj.version++; tsSdbObj.version++;
pHead->version = tsSdbObj.version; pHead->version = tsSdbObj.version;
} else { } else {
// for data from WAL or forward, version may be smaller // for data from WAL or forward, version may be smaller
if (pHead->version <= tsSdbObj.version) { if (pHead->version <= tsSdbObj.version) {
pthread_mutex_unlock(&tsSdbObj.mutex); pthread_mutex_unlock(&tsSdbObj.mutex);
if (type == TAOS_QTYPE_FWD && tsSdbObj.sync != NULL) { sdbDebug("table:%s, failed to restore %s record:%s from source(%d), version:%" PRId64 " too large, sdb version:%" PRId64,
sdbDebug("forward request is received, version:%" PRIu64 " confirm it", pHead->version); pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version);
syncConfirmForward(tsSdbObj.sync, pHead->version, TSDB_CODE_SUCCESS);
}
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else if (pHead->version != tsSdbObj.version + 1) { } else if (pHead->version != tsSdbObj.version + 1) {
pthread_mutex_unlock(&tsSdbObj.mutex); pthread_mutex_unlock(&tsSdbObj.mutex);
sdbError("table:%s, failed to restore %s record:%s from wal, version:%" PRId64 " too large, sdb version:%" PRId64, sdbError("table:%s, failed to restore %s record:%s from source(%d), version:%" PRId64 " too large, sdb version:%" PRId64,
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), type, pHead->version, tsSdbObj.version);
tsSdbObj.version);
return TSDB_CODE_MND_APP_ERROR; return TSDB_CODE_MND_APP_ERROR;
} else { } else {
tsSdbObj.version = pHead->version; tsSdbObj.version = pHead->version;
...@@ -542,28 +545,36 @@ static int sdbWrite(void *param, void *data, int type) { ...@@ -542,28 +545,36 @@ static int sdbWrite(void *param, void *data, int type) {
pthread_mutex_unlock(&tsSdbObj.mutex); pthread_mutex_unlock(&tsSdbObj.mutex);
return code; return code;
} }
code = sdbForwardToPeer(pHead);
pthread_mutex_unlock(&tsSdbObj.mutex); pthread_mutex_unlock(&tsSdbObj.mutex);
// from app, oper is created // from app, oper is created
if (pOper != NULL) { if (pOper != NULL) {
sdbTrace("record from app is disposed, table:%s action:%s record:%s version:%" PRIu64 " result:%s", // forward to peers
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version, pOper->processedCount = 0;
tstrerror(code)); int32_t syncCode = syncForwardToPeer(tsSdbObj.sync, pHead, pOper, TAOS_QTYPE_RPC);
return code; if (syncCode <= 0) pOper->processedCount = 1;
if (syncCode < 0) {
sdbError("table:%s, failed to forward request, result:%s action:%s record:%s version:%" PRId64, pTable->tableName,
tstrerror(syncCode), sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
} else if (syncCode > 0) {
sdbDebug("table:%s, forward request is sent, action:%s record:%s version:%" PRId64, pTable->tableName,
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
} else {
sdbTrace("table:%s, no need to send fwd request, action:%s record:%s version:%" PRId64, pTable->tableName,
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
}
return syncCode;
} }
// from wal or forward msg, oper not created, should add into hash sdbDebug("table:%s, record from wal/fwd is disposed, action:%s record:%s version:%" PRId64, pTable->tableName,
if (tsSdbObj.sync != NULL) { sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
sdbTrace("record from wal forward is disposed, table:%s action:%s record:%s version:%" PRIu64 " confirm it",
pTable->tableName, sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
syncConfirmForward(tsSdbObj.sync, pHead->version, code);
} else {
sdbTrace("record from wal restore is disposed, table:%s action:%s record:%s version:%" PRIu64, pTable->tableName,
sdbGetActionStr(action), sdbGetKeyStr(pTable, pHead->cont), pHead->version);
}
// even it is WAL/FWD, it shall be called to update version in sync
syncForwardToPeer(tsSdbObj.sync, pHead, pOper, TAOS_QTYPE_RPC);
// from wal or forward msg, oper not created, should add into hash
if (action == SDB_ACTION_INSERT) { if (action == SDB_ACTION_INSERT) {
SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable}; SSdbOper oper = {.rowSize = pHead->len, .rowData = pHead->cont, .table = pTable};
code = (*pTable->decodeFp)(&oper); code = (*pTable->decodeFp)(&oper);
...@@ -627,7 +638,7 @@ int32_t sdbInsertRow(SSdbOper *pOper) { ...@@ -627,7 +638,7 @@ int32_t sdbInsertRow(SSdbOper *pOper) {
memcpy(pNewOper, pOper, sizeof(SSdbOper)); memcpy(pNewOper, pOper, sizeof(SSdbOper));
if (pNewOper->pMsg != NULL) { if (pNewOper->pMsg != NULL) {
sdbDebug("app:%p:%p, table:%s record:%p:%s, insert action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle, sdbDebug("app:%p:%p, table:%s record:%p:%s, insert action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle,
pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj));
} }
...@@ -677,7 +688,7 @@ int32_t sdbDeleteRow(SSdbOper *pOper) { ...@@ -677,7 +688,7 @@ int32_t sdbDeleteRow(SSdbOper *pOper) {
memcpy(pNewOper, pOper, sizeof(SSdbOper)); memcpy(pNewOper, pOper, sizeof(SSdbOper));
if (pNewOper->pMsg != NULL) { if (pNewOper->pMsg != NULL) {
sdbDebug("app:%p:%p, table:%s record:%p:%s, delete action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle, sdbDebug("app:%p:%p, table:%s record:%p:%s, delete action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle,
pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj));
} }
...@@ -727,7 +738,7 @@ int32_t sdbUpdateRow(SSdbOper *pOper) { ...@@ -727,7 +738,7 @@ int32_t sdbUpdateRow(SSdbOper *pOper) {
memcpy(pNewOper, pOper, sizeof(SSdbOper)); memcpy(pNewOper, pOper, sizeof(SSdbOper));
if (pNewOper->pMsg != NULL) { if (pNewOper->pMsg != NULL) {
sdbDebug("app:%p:%p, table:%s record:%p:%s, update action is add to sdb queue, ", pNewOper->pMsg->rpcMsg.ahandle, sdbDebug("app:%p:%p, table:%s record:%p:%s, update action is add to sdb queue", pNewOper->pMsg->rpcMsg.ahandle,
pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj)); pNewOper->pMsg, pTable->tableName, pOper->pObj, sdbGetKeyStrFromObj(pTable, pOper->pObj));
} }
...@@ -943,20 +954,20 @@ static void *sdbWorkerFp(void *param) { ...@@ -943,20 +954,20 @@ static void *sdbWorkerFp(void *param) {
taosGetQitem(tsSdbWriteQall, &type, &item); taosGetQitem(tsSdbWriteQall, &type, &item);
if (type == TAOS_QTYPE_RPC) { if (type == TAOS_QTYPE_RPC) {
pOper = (SSdbOper *)item; pOper = (SSdbOper *)item;
pOper->processedCount = 1;
pHead = (void *)pOper + sizeof(SSdbOper) + SDB_SYNC_HACK; pHead = (void *)pOper + sizeof(SSdbOper) + SDB_SYNC_HACK;
if (pOper->pMsg != NULL) {
sdbDebug("app:%p:%p, table:%s record:%p:%s version:%" PRIu64 ", will be processed in sdb queue",
pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, ((SSdbTable *)pOper->table)->tableName, pOper->pObj,
sdbGetKeyStr(pOper->table, pHead->cont), pHead->version);
}
} else { } else {
pHead = (SWalHead *)item; pHead = (SWalHead *)item;
pOper = NULL; pOper = NULL;
} }
if (pOper != NULL && pOper->pMsg != NULL) {
sdbDebug("app:%p:%p, table:%s record:%p:%s version:%" PRIu64 ", will be processed in sdb queue",
pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, ((SSdbTable *)pOper->table)->tableName, pOper->pObj,
sdbGetKeyStr(pOper->table, pHead->cont), pHead->version);
}
int32_t code = sdbWrite(pOper, pHead, type); int32_t code = sdbWrite(pOper, pHead, type);
if (pOper) pOper->retCode = code; if (pOper && code <= 0) pOper->retCode = code;
} }
walFsync(tsSdbObj.wal); walFsync(tsSdbObj.wal);
...@@ -965,25 +976,17 @@ static void *sdbWorkerFp(void *param) { ...@@ -965,25 +976,17 @@ static void *sdbWorkerFp(void *param) {
taosResetQitems(tsSdbWriteQall); taosResetQitems(tsSdbWriteQall);
for (int32_t i = 0; i < numOfMsgs; ++i) { for (int32_t i = 0; i < numOfMsgs; ++i) {
taosGetQitem(tsSdbWriteQall, &type, &item); taosGetQitem(tsSdbWriteQall, &type, &item);
if (type == TAOS_QTYPE_RPC) { if (type == TAOS_QTYPE_RPC) {
pOper = (SSdbOper *)item; pOper = (SSdbOper *)item;
if (pOper != NULL && pOper->cb != NULL) { sdbDecRef(pOper->table, pOper->pObj);
sdbTrace("app:%p:%p, will do callback func, index:%d", pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, i); sdbConfirmForward(NULL, pOper, pOper->retCode);
pOper->retCode = (*pOper->cb)(pOper->pMsg, pOper->retCode); } else if (type == TAOS_QTYPE_FWD) {
} syncConfirmForward(tsSdbObj.sync, pHead->version, TSDB_CODE_SUCCESS);
taosFreeQitem(item);
if (pOper != NULL && pOper->pMsg != NULL) { } else {
sdbTrace("app:%p:%p, msg is processed, result:%s", pOper->pMsg->rpcMsg.ahandle, pOper->pMsg, taosFreeQitem(item);
tstrerror(pOper->retCode));
}
if (pOper != NULL) {
sdbDecRef(pOper->table, pOper->pObj);
}
dnodeSendRpcMnodeWriteRsp(pOper->pMsg, pOper->retCode);
} }
taosFreeQitem(item);
} }
} }
......
...@@ -783,9 +783,15 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) { ...@@ -783,9 +783,15 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) {
static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) { static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) {
SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable; SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable;
if (pTable != NULL) { assert(pTable);
mLInfo("app:%p:%p, stable:%s, is created in sdb, result:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
tstrerror(code)); if (code == TSDB_CODE_SUCCESS) {
mLInfo("stable:%s, is created in sdb", pTable->info.tableId);
} else {
mError("app:%p:%p, stable:%s, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
tstrerror(code));
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsSuperTableSdb};
sdbDeleteRow(&desc);
} }
return code; return code;
...@@ -1561,10 +1567,16 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) { ...@@ -1561,10 +1567,16 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable; SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
assert(pTable); assert(pTable);
mDebug("app:%p:%p, table:%s, create table in id:%d, uid:%" PRIu64 ", result:%s", pMsg->rpcMsg.ahandle, pMsg, if (code == TSDB_CODE_SUCCESS) {
pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code)); mDebug("app:%p:%p, table:%s, create table in sid:%d, uid:%" PRIu64, pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
pTable->sid, pTable->uid);
if (code != TSDB_CODE_SUCCESS) return code; } else {
mError("app:%p:%p, table:%s, failed to create table sid:%d, uid:%" PRIu64 ", reason:%s", pMsg->rpcMsg.ahandle, pMsg,
pTable->info.tableId, pTable->sid, pTable->uid, tstrerror(code));
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pTable, .table = tsChildTableSdb};
sdbDeleteRow(&desc);
return code;
}
SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont;
SMDCreateTableMsg *pMDCreate = mnodeBuildCreateChildTableMsg(pCreate, pTable); SMDCreateTableMsg *pMDCreate = mnodeBuildCreateChildTableMsg(pCreate, pTable);
......
...@@ -348,17 +348,23 @@ void *mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup) { ...@@ -348,17 +348,23 @@ void *mnodeGetNextVgroup(void *pIter, SVgObj **pVgroup) {
} }
static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) { static int32_t mnodeCreateVgroupCb(SMnodeMsg *pMsg, int32_t code) {
SVgObj *pVgroup = pMsg->pVgroup;
SDbObj *pDb = pMsg->pDb;
assert(pVgroup);
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
pMsg->pVgroup = NULL; mError("app:%p:%p, vgId:%d, failed to create in sdb, reason:%s", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId,
tstrerror(code));
SSdbOper desc = {.type = SDB_OPER_GLOBAL, .pObj = pVgroup, .table = tsVgroupSdb};
sdbDeleteRow(&desc);
return code; return code;
} }
SVgObj *pVgroup = pMsg->pVgroup; mInfo("app:%p:%p, vgId:%d, is created in mnode, db:%s replica:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId,
SDbObj *pDb = pMsg->pDb; pDb->name, pVgroup->numOfVnodes);
mInfo("vgId:%d, is created in mnode, db:%s replica:%d", pVgroup->vgId, pDb->name, pVgroup->numOfVnodes);
for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) {
mInfo("vgId:%d, index:%d, dnode:%d", pVgroup->vgId, i, pVgroup->vnodeGid[i].dnodeId); mInfo("app:%p:%p, vgId:%d, index:%d, dnode:%d", pMsg->rpcMsg.ahandle, pMsg, pVgroup->vgId, i,
pVgroup->vnodeGid[i].dnodeId);
} }
mnodeIncVgroupRef(pVgroup); mnodeIncVgroupRef(pVgroup);
......
...@@ -156,6 +156,7 @@ int main(int argc, char *argv[]) { ...@@ -156,6 +156,7 @@ int main(int argc, char *argv[]) {
} }
tInfo("client is initialized"); tInfo("client is initialized");
tInfo("threads:%d msgSize:%d requests:%d", appThreads, msgSize, numOfReqs);
gettimeofday(&systemTime, NULL); gettimeofday(&systemTime, NULL);
startTime = systemTime.tv_sec*1000000 + systemTime.tv_usec; startTime = systemTime.tv_sec*1000000 + systemTime.tv_usec;
......
...@@ -24,23 +24,21 @@ int msgSize = 128; ...@@ -24,23 +24,21 @@ int msgSize = 128;
int commit = 0; int commit = 0;
int dataFd = -1; int dataFd = -1;
void *qhandle = NULL; void *qhandle = NULL;
void *qset = NULL;
void processShellMsg() { void processShellMsg() {
static int num = 0; static int num = 0;
taos_qall qall; taos_qall qall;
SRpcMsg *pRpcMsg, rpcMsg; SRpcMsg *pRpcMsg, rpcMsg;
int type; int type;
void *pvnode;
qall = taosAllocateQall(); qall = taosAllocateQall();
while (1) { while (1) {
int numOfMsgs = taosReadAllQitems(qhandle, qall); int numOfMsgs = taosReadAllQitemsFromQset(qset, qall, &pvnode);
if (numOfMsgs <= 0) {
usleep(100);
continue;
}
tDebug("%d shell msgs are received", numOfMsgs); tDebug("%d shell msgs are received", numOfMsgs);
if (numOfMsgs <= 0) break;
for (int i=0; i<numOfMsgs; ++i) { for (int i=0; i<numOfMsgs; ++i) {
taosGetQitem(qall, &type, (void **)&pRpcMsg); taosGetQitem(qall, &type, (void **)&pRpcMsg);
...@@ -82,15 +80,6 @@ void processShellMsg() { ...@@ -82,15 +80,6 @@ void processShellMsg() {
} }
taosFreeQall(qall); taosFreeQall(qall);
/*
SRpcIpSet ipSet;
ipSet.numOfIps = 1;
ipSet.index = 0;
ipSet.port = 7000;
ipSet.ip[0] = inet_addr("192.168.0.2");
rpcSendRedirectRsp(ahandle, &ipSet);
*/
} }
...@@ -189,6 +178,8 @@ int main(int argc, char *argv[]) { ...@@ -189,6 +178,8 @@ int main(int argc, char *argv[]) {
} }
qhandle = taosOpenQueue(sizeof(SRpcMsg)); qhandle = taosOpenQueue(sizeof(SRpcMsg));
qset = taosOpenQset();
taosAddIntoQset(qset, qhandle, NULL);
processShellMsg(); processShellMsg();
......
此差异已折叠。
...@@ -38,4 +38,4 @@ export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3 ...@@ -38,4 +38,4 @@ export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/../../build/build/lib export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/../../build/build/lib
# Now we are all let, and let's see if we can find a crash. Note we pass all params # Now we are all let, and let's see if we can find a crash. Note we pass all params
./crash_gen.py $@ python3 ./crash_gen.py $@
sql connect
$db = db1
$stb = stb1
print =============== client1_0:
sql use $db
$tblNum = 1000
$i = 1
while $i < $tblNum
$tb = tb . $i
sql create table $tb using $stb tags ($i, 'abcd')
$i = $i + 1
endw
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode4 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/cfg.sh -n dnode2 -c walLevel -v 1
system sh/cfg.sh -n dnode3 -c walLevel -v 1
system sh/cfg.sh -n dnode4 -c walLevel -v 1
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode3 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode4 -c numOfTotalVnodes -v 4
system sh/cfg.sh -n dnode1 -c alternativeRole -v 0
system sh/cfg.sh -n dnode2 -c alternativeRole -v 0
system sh/cfg.sh -n dnode3 -c alternativeRole -v 0
system sh/cfg.sh -n dnode4 -c alternativeRole -v 0
system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode4 -c maxtablesPerVnode -v 1000
system sh/cfg.sh -n dnode1 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode2 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode3 -c arbitrator -v $arbitrator
system sh/cfg.sh -n dnode4 -c arbitrator -v $arbitrator
print ============== step0: start tarbitrator
system sh/exec_tarbitrator.sh -s start
print ============== step1: start dnode1/dnode2/dnode3
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
sleep 3000
sql connect
sql create dnode $hostname2
sql create dnode $hostname3
sleep 3000
print ============== step2: create db1 with replica 3
$db = db1
print create database $db replica 3
#sql create database $db replica 3 maxTables $totalTableNum
sql create database $db replica 3
sql use $db
print ============== step3: create stable stb1
$stb = stb1
sql create table $stb (ts timestamp, c1 int, c2 int) tags(t1 int, t2 binary(8))
print ============== step4: start 10 client1/ 10 client2/ 10 client3/ 10 client4/ 1 client5
run_back unique/cluster/client1_0.sim
#run_back unique/cluster/client1_1.sim
#run_back unique/big_cluster/client1_2.sim
#run_back unique/big_cluster/client1_3.sim
#run_back unique/big_cluster/client1_4.sim
#run_back unique/big_cluster/client1_5.sim
#run_back unique/big_cluster/client1_6.sim
#run_back unique/big_cluster/client1_7.sim
#run_back unique/big_cluster/client1_8.sim
#run_back unique/big_cluster/client1_9.sim
print wait for a while to let clients start insert data
sleep 5000
$loop_cnt = 0
loop_cluster_do:
print **** **** **** START loop cluster do **** **** **** ****
print ============== step5: start dnode4 and add into cluster, then wait dnode4 ready
system sh/exec.sh -n dnode4 -s start
sql create dnode $hostname4
wait_dnode4_ready_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode4_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode4Status = $data4_4
elif $loop_cnt == 1 then
$dnode4Status = $data4_6
elif $loop_cnt == 2 then
$dnode4Status = $data4_8
else then
print **** **** **** END loop cluster do 2**** **** **** ****
return
endi
if $dnode4Status != ready then
sleep 2000
goto wait_dnode4_ready_0
endi
print ============== step6: stop and drop dnode1, then remove data dir of dnode1
system sh/exec.sh -n dnode1 -s stop -x SIGINT
$cnt = 0
wait_dnode1_offline_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 4 then
sleep 2000
goto wait_dnode1_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode1Status = $data4_1
elif $loop_cnt == 1 then
$dnode1Status = $data4_5
elif $loop_cnt == 2 then
$dnode1Status = $data4_7
elif $loop_cnt == 3 then
$dnode1Status = $data4_9
else then
print **** **** **** END loop cluster do 1**** **** **** ****
return
endi
if $dnode1Status != offline then
sleep 2000
goto wait_dnode1_offline_0
endi
sql drop dnode $hostname1
system rm -rf ../../../sim/dnode1
print ============== step7: stop dnode2, because mnodes < 50%, so clusert don't provide services
system sh/exec.sh -n dnode2 -s stop -x SIGINT
sql show dnodes -x wait_dnode2_offline_0
if $rows != 3 then
sleep 2000
goto wait_dnode2_offline_0
endi
wait_dnode2_offline_0:
#$cnt = 0
#wait_dnode2_offline_0:
#$cnt = $cnt + 1
#if $cnt == 10 then
# return -1
#endi
#sql show dnodes -x wait_dnode2_offline_0
#if $rows != 3 then
# sleep 2000
# goto wait_dnode2_offline_0
#endi
#print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
#print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
#print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
#print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#$dnode1Status = $data4_1
#$dnode2Status = $data4_2
#$dnode3Status = $data4_3
#$dnode4Status = $data4_4
#
#if $dnode2Status != offline then
# sleep 2000
# goto wait_dnode1_offline_0
#endi
print ============== step8: restart dnode2, then wait sync end
system sh/exec.sh -n dnode2 -s start
$cnt = 0
wait_dnode2_ready_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode2_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $dnode2Status != ready then
sleep 2000
goto wait_dnode2_ready_0
endi
print ============== step9: stop dnode3, then wait sync end
system sh/exec.sh -n dnode3 -s stop -x SIGINT
$cnt = 0
wait_dnode3_offline_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode3_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $dnode3Status != offline then
sleep 2000
goto wait_dnode3_offline_0
endi
print ============== step10: restart dnode3, then wait sync end
system sh/exec.sh -n dnode3 -s start
$cnt = 0
wait_dnode3_ready_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode3_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $dnode3Status != ready then
sleep 2000
goto wait_dnode3_ready_0
endi
print ============== step11: stop dnode4, then wait sync end
system sh/exec.sh -n dnode4 -s stop -x SIGINT
$cnt = 0
wait_dnode4_offline_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode4_offline_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode4Status = $data4_4
elif $loop_cnt == 1 then
$dnode4Status = $data4_6
elif $loop_cnt == 2 then
$dnode4Status = $data4_8
else then
print **** **** **** END loop cluster do 2**** **** **** ****
return
endi
if $dnode4Status != offline then
sleep 2000
goto wait_dnode4_offline_0
endi
print ============== step12: restart dnode4, then wait sync end
system sh/exec.sh -n dnode4 -s start
$cnt = 0
wait_dnode4_ready_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode4_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode4Status = $data4_4
elif $loop_cnt == 1 then
$dnode4Status = $data4_6
elif $loop_cnt == 2 then
$dnode4Status = $data4_8
else then
print **** **** **** END loop cluster do 2**** **** **** ****
return
endi
if $dnode4Status != ready then
sleep 2000
goto wait_dnode4_ready_0
endi
print ============== step13: alter replica 2
sql alter database $db replica 2
sql show database
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
if $data0_5 != 2 then
print rplica is not modify to 2, error!!!!!!
return
endi
print ============== step14: stop and drop dnode4, then remove data dir of dnode4
system sh/exec.sh -n dnode4 -s stop -x SIGINT
$cnt = 0
wait_dnode4_offline_1:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode4_offline_1
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
$dnode2Status = $data4_2
$dnode3Status = $data4_3
#$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode4Status = $data4_4
elif $loop_cnt == 1 then
$dnode4Status = $data4_6
elif $loop_cnt == 2 then
$dnode4Status = $data4_8
else then
print **** **** **** END loop cluster do 2**** **** **** ****
return
endi
if $dnode4Status != offline then
sleep 2000
goto wait_dnode4_offline_1
endi
sql drop dnode $hostname4
system rm -rf ../../../sim/dnode4
print ============== step15: alter replica 1
sql alter database $db replica 1
sql show database
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
if $data0_5 != 1 then
print rplica is not modify to 1, error!!!!!!
return
endi
print ============== step16: alter replica 2
sql alter database $db replica 1
sql show database
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
if $data0_5 != 2 then
print rplica is not modify to 2, error!!!!!!
return
endi
print ============== step17: start dnode1 and add into cluster, then wait dnode1 ready
system sh/exec.sh -n dnode1 -s start
sql create dnode $hostname1
wait_dnode1_ready_0:
$cnt = $cnt + 1
if $cnt == 10 then
return -1
endi
sql show dnodes
if $rows != 3 then
sleep 2000
goto wait_dnode1_ready_0
endi
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4
#$dnode1Status = $data4_1
$dnode2Status = $data4_2
$dnode3Status = $data4_3
$dnode4Status = $data4_4
if $loop_cnt == 0 then
$dnode1Status = $data4_1
elif $loop_cnt == 1 then
$dnode1Status = $data4_5
elif $loop_cnt == 2 then
$dnode1Status = $data4_7
elif $loop_cnt == 3 then
$dnode1Status = $data4_9
else then
print **** **** **** END loop cluster do 3**** **** **** ****
return
endi
if $dnode1Status != ready then
sleep 2000
goto wait_dnode1_ready_0
endi
print ============== step18: alter replica 3
sql alter database $db replica 3
sql show database
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1
if $data0_5 != 3 then
print rplica is not modify to 3, error!!!!!!
return
endi
$loop_cnt = $loop_cnt + 1
goto loop_cluster_do
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册