提交 5a894114 编写于 作者: S Shengliang Guan

Merge remote-tracking branch 'origin/3.0' into fix/tsim

...@@ -152,7 +152,10 @@ void taosCfgDynamicOptions(const char *option, const char *value); ...@@ -152,7 +152,10 @@ void taosCfgDynamicOptions(const char *option, const char *value);
void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary); void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary);
struct SConfig *taosGetCfg(); struct SConfig *taosGetCfg();
int32_t taosSetCfg(SConfig *pCfg, char* name);
void taosSetAllDebugFlag(int32_t flag);
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal);
int32_t taosSetCfg(SConfig *pCfg, char *name);
#ifdef __cplusplus #ifdef __cplusplus
} }
......
...@@ -67,7 +67,6 @@ extern int32_t idxDebugFlag; ...@@ -67,7 +67,6 @@ extern int32_t idxDebugFlag;
int32_t taosInitLog(const char *logName, int32_t maxFiles); int32_t taosInitLog(const char *logName, int32_t maxFiles);
void taosCloseLog(); void taosCloseLog();
void taosResetLog(); void taosResetLog();
void taosSetAllDebugFlag(int32_t flag);
void taosDumpData(uint8_t *msg, int32_t len); void taosDumpData(uint8_t *msg, int32_t len);
void taosPrintLog(const char *flags, ELogLevel level, int32_t dflag, const char *format, ...) void taosPrintLog(const char *flags, ELogLevel level, int32_t dflag, const char *format, ...)
......
...@@ -1878,7 +1878,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks ...@@ -1878,7 +1878,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks
msgLen += sizeof(SSubmitBlk); msgLen += sizeof(SSubmitBlk);
int32_t dataLen = 0; int32_t dataLen = 0;
for (int32_t j = 0; j < rows; ++j) { // iterate by row for (int32_t j = 0; j < rows; ++j) { // iterate by row
tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen)); // set row buf tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen + dataLen)); // set row buf
bool isStartKey = false; bool isStartKey = false;
int32_t offset = 0; int32_t offset = 0;
for (int32_t k = 0; k < colNum; ++k) { // iterate by column for (int32_t k = 0; k < colNum; ++k) { // iterate by column
......
...@@ -1143,6 +1143,10 @@ void taosCfgDynamicOptions(const char *option, const char *value) { ...@@ -1143,6 +1143,10 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
int32_t monitor = atoi(value); int32_t monitor = atoi(value);
uInfo("monitor set from %d to %d", tsEnableMonitor, monitor); uInfo("monitor set from %d to %d", tsEnableMonitor, monitor);
tsEnableMonitor = monitor; tsEnableMonitor = monitor;
SConfigItem *pItem = cfgGetItem(tsCfg, "monitor");
if (pItem != NULL) {
pItem->bval = tsEnableMonitor;
}
return; return;
} }
...@@ -1166,8 +1170,39 @@ void taosCfgDynamicOptions(const char *option, const char *value) { ...@@ -1166,8 +1170,39 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
int32_t flag = atoi(value); int32_t flag = atoi(value);
uInfo("%s set from %d to %d", optName, *optionVars[d], flag); uInfo("%s set from %d to %d", optName, *optionVars[d], flag);
*optionVars[d] = flag; *optionVars[d] = flag;
taosSetDebugFlag(optionVars[d], optName, flag);
return; return;
} }
uError("failed to cfg dynamic option:%s value:%s", option, value); uError("failed to cfg dynamic option:%s value:%s", option, value);
} }
void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal) {
SConfigItem *pItem = cfgGetItem(tsCfg, flagName);
if (pItem != NULL) {
pItem->i32 = flagVal;
}
*pFlagPtr = flagVal;
}
void taosSetAllDebugFlag(int32_t flag) {
if (flag <= 0) return;
taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag);
taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag);
taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag);
taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag);
taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag);
taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag);
taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag);
taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag);
taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag);
taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag);
taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag);
taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag);
taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag);
taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag);
taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag);
taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag);
uInfo("all debug flag are set to %d", flag);
}
...@@ -874,7 +874,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) { ...@@ -874,7 +874,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
} }
static int32_t mndProcessConfigDnodeRsp(SRpcMsg *pRsp) { static int32_t mndProcessConfigDnodeRsp(SRpcMsg *pRsp) {
mInfo("config rsp from dnode, app:%p", pRsp->info.ahandle); mInfo("config rsp from dnode");
return 0; return 0;
} }
......
...@@ -335,6 +335,7 @@ int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcTy ...@@ -335,6 +335,7 @@ int32_t cfgSetItem(SConfig *pCfg, const char *name, const char *value, ECfgSrcTy
} }
SConfigItem *cfgGetItem(SConfig *pCfg, const char *name) { SConfigItem *cfgGetItem(SConfig *pCfg, const char *name) {
if (pCfg == NULL) return NULL;
int32_t size = taosArrayGetSize(pCfg->array); int32_t size = taosArrayGetSize(pCfg->array);
for (int32_t i = 0; i < size; ++i) { for (int32_t i = 0; i < size; ++i) {
SConfigItem *pItem = taosArrayGet(pCfg->array, i); SConfigItem *pItem = taosArrayGet(pCfg->array, i);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "tlog.h" #include "tlog.h"
#include "os.h" #include "os.h"
#include "tutil.h" #include "tutil.h"
#include "tconfig.h"
#define LOG_MAX_LINE_SIZE (1024) #define LOG_MAX_LINE_SIZE (1024)
#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3) #define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3)
...@@ -62,6 +63,7 @@ typedef struct { ...@@ -62,6 +63,7 @@ typedef struct {
TdThreadMutex logMutex; TdThreadMutex logMutex;
} SLogObj; } SLogObj;
extern SConfig *tsCfg;
static int8_t tsLogInited = 0; static int8_t tsLogInited = 0;
static SLogObj tsLogObj = {.fileNum = 1}; static SLogObj tsLogObj = {.fileNum = 1};
static int64_t tsAsyncLogLostLines = 0; static int64_t tsAsyncLogLostLines = 0;
...@@ -741,25 +743,3 @@ cmp_end: ...@@ -741,25 +743,3 @@ cmp_end:
return ret; return ret;
} }
void taosSetAllDebugFlag(int32_t flag) {
if (flag <= 0) return;
uDebugFlag = flag;
rpcDebugFlag = flag;
jniDebugFlag = flag;
qDebugFlag = flag;
cDebugFlag = flag;
dDebugFlag = flag;
vDebugFlag = flag;
mDebugFlag = flag;
wDebugFlag = flag;
sDebugFlag = flag;
tsdbDebugFlag = flag;
tqDebugFlag = flag;
fsDebugFlag = flag;
udfDebugFlag = flag;
smaDebugFlag = flag;
idxDebugFlag = flag;
uInfo("all debug flag are set to %d", flag);
}
...@@ -65,14 +65,14 @@ class TDTestCase: ...@@ -65,14 +65,14 @@ class TDTestCase:
is_leader=True is_leader=True
if count==1 and is_leader: if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====") tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
else: else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items(): for k ,v in self.dnode_list.items():
if k == mnode_name: if k == mnode_name:
if v[3]==0: if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
...@@ -115,7 +115,7 @@ class TDTestCase: ...@@ -115,7 +115,7 @@ class TDTestCase:
for k , v in vgroups_infos.items(): for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader": if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else: else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
......
...@@ -71,14 +71,14 @@ class TDTestCase: ...@@ -71,14 +71,14 @@ class TDTestCase:
is_leader=True is_leader=True
if count==1 and is_leader: if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====") tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
else: else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items(): for k ,v in self.dnode_list.items():
if k == mnode_name: if k == mnode_name:
if v[3]==0: if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
...@@ -121,7 +121,7 @@ class TDTestCase: ...@@ -121,7 +121,7 @@ class TDTestCase:
for k , v in vgroups_infos.items(): for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader": if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else: else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
...@@ -129,7 +129,7 @@ class TDTestCase: ...@@ -129,7 +129,7 @@ class TDTestCase:
drop_db_sql = "drop database if exists {}".format(dbname) drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname)) tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql) tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql) tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname)) tdSql.execute("use {}".format(dbname))
...@@ -155,7 +155,7 @@ class TDTestCase: ...@@ -155,7 +155,7 @@ class TDTestCase:
ts = self.ts + 1000*row_num ts = self.ts + 1000*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== create database {} and insert rows execute end =====".format(dbname)) tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname))
def check_insert_status(self, dbname, tb_nums , row_nums): def check_insert_status(self, dbname, tb_nums , row_nums):
tdSql.execute("use {}".format(dbname)) tdSql.execute("use {}".format(dbname))
......
...@@ -71,14 +71,14 @@ class TDTestCase: ...@@ -71,14 +71,14 @@ class TDTestCase:
is_leader=True is_leader=True
if count==1 and is_leader: if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====") tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
else: else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items(): for k ,v in self.dnode_list.items():
if k == mnode_name: if k == mnode_name:
if v[3]==0: if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
...@@ -121,7 +121,7 @@ class TDTestCase: ...@@ -121,7 +121,7 @@ class TDTestCase:
for k , v in vgroups_infos.items(): for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader": if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else: else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
...@@ -129,7 +129,7 @@ class TDTestCase: ...@@ -129,7 +129,7 @@ class TDTestCase:
drop_db_sql = "drop database if exists {}".format(dbname) drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname)) tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql) tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql) tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname)) tdSql.execute("use {}".format(dbname))
...@@ -155,7 +155,7 @@ class TDTestCase: ...@@ -155,7 +155,7 @@ class TDTestCase:
ts = self.ts + 1000*row_num ts = self.ts + 1000*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== create database {} and insert rows execute end =====".format(dbname)) tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname))
def check_insert_status(self, dbname, tb_nums , row_nums): def check_insert_status(self, dbname, tb_nums , row_nums):
tdSql.execute("use {}".format(dbname)) tdSql.execute("use {}".format(dbname))
......
...@@ -80,14 +80,14 @@ class TDTestCase: ...@@ -80,14 +80,14 @@ class TDTestCase:
is_leader=True is_leader=True
if count==1 and is_leader: if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====") tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
else: else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items(): for k ,v in self.dnode_list.items():
if k == mnode_name: if k == mnode_name:
if v[3]==0: if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
...@@ -130,7 +130,7 @@ class TDTestCase: ...@@ -130,7 +130,7 @@ class TDTestCase:
for k , v in vgroups_infos.items(): for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader": if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else: else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
...@@ -138,7 +138,7 @@ class TDTestCase: ...@@ -138,7 +138,7 @@ class TDTestCase:
drop_db_sql = "drop database if exists {}".format(dbname) drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname)) tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql) tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql) tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname)) tdSql.execute("use {}".format(dbname))
...@@ -161,7 +161,7 @@ class TDTestCase: ...@@ -161,7 +161,7 @@ class TDTestCase:
ts = self.ts + self.ts_step*row_num ts = self.ts + self.ts_step*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename)) tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
...@@ -170,7 +170,7 @@ class TDTestCase: ...@@ -170,7 +170,7 @@ class TDTestCase:
for row_num in range(append_nums): for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename)) tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename)) os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
...@@ -197,7 +197,7 @@ class TDTestCase: ...@@ -197,7 +197,7 @@ class TDTestCase:
time.sleep(0.1) time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) tdLog.debug(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1 count += 1
...@@ -218,7 +218,7 @@ class TDTestCase: ...@@ -218,7 +218,7 @@ class TDTestCase:
time.sleep(0.1) time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) tdLog.debug(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1 count += 1
def _get_stop_dnode_id(self,dbname): def _get_stop_dnode_id(self,dbname):
tdSql.query("show {}.vgroups".format(dbname)) tdSql.query("show {}.vgroups".format(dbname))
...@@ -255,8 +255,8 @@ class TDTestCase: ...@@ -255,8 +255,8 @@ class TDTestCase:
while status !="offline": while status !="offline":
time.sleep(0.1) time.sleep(0.1)
status = _get_status() status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id))
def wait_start_dnode_OK(self): def wait_start_dnode_OK(self):
...@@ -277,8 +277,8 @@ class TDTestCase: ...@@ -277,8 +277,8 @@ class TDTestCase:
while status !="ready": while status !="ready":
time.sleep(0.1) time.sleep(0.1)
status = _get_status() status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id)) tdLog.notice("==== stop_dnode has restart , id is {} ====".format(self.stop_dnode_id))
def _parse_datetime(self,timestr): def _parse_datetime(self,timestr):
try: try:
...@@ -342,9 +342,9 @@ class TDTestCase: ...@@ -342,9 +342,9 @@ class TDTestCase:
elif isinstance(data, str): elif isinstance(data, str):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data)) (sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, datetime.date): # elif isinstance(data, datetime.date):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % # tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data)) # (sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, float): elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data)) (sql, row, col, tdSql.queryResult[row][col], data))
...@@ -389,15 +389,15 @@ class TDTestCase: ...@@ -389,15 +389,15 @@ class TDTestCase:
# append rows of stablename when dnode stop # append rows of stablename when dnode stop
tbname = "sub_{}_{}".format(stablename , 0) tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 ) self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100) self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables # create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin start dnode # begin start dnode
...@@ -409,9 +409,9 @@ class TDTestCase: ...@@ -409,9 +409,9 @@ class TDTestCase:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
# create new stables again # create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
def unsync_run_case(self): def unsync_run_case(self):
...@@ -447,7 +447,7 @@ class TDTestCase: ...@@ -447,7 +447,7 @@ class TDTestCase:
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name)) tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
# create sync threading and start it # create sync threading and start it
self.current_thread = _create_threading(db_name) self.current_thread = _create_threading(db_name)
...@@ -457,21 +457,21 @@ class TDTestCase: ...@@ -457,21 +457,21 @@ class TDTestCase:
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
tbname = "sub_{}_{}".format(stablename , 0) tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 ) self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100) self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables # create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# create new stables again # create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
self.current_thread.join() self.current_thread.join()
......
...@@ -80,14 +80,14 @@ class TDTestCase: ...@@ -80,14 +80,14 @@ class TDTestCase:
is_leader=True is_leader=True
if count==1 and is_leader: if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====") tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
else: else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items(): for k ,v in self.dnode_list.items():
if k == mnode_name: if k == mnode_name:
if v[3]==0: if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
...@@ -130,7 +130,7 @@ class TDTestCase: ...@@ -130,7 +130,7 @@ class TDTestCase:
for k , v in vgroups_infos.items(): for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader": if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else: else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
...@@ -138,7 +138,7 @@ class TDTestCase: ...@@ -138,7 +138,7 @@ class TDTestCase:
drop_db_sql = "drop database if exists {}".format(dbname) drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname)) tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql) tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql) tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname)) tdSql.execute("use {}".format(dbname))
...@@ -161,7 +161,7 @@ class TDTestCase: ...@@ -161,7 +161,7 @@ class TDTestCase:
ts = self.ts + self.ts_step*row_num ts = self.ts + self.ts_step*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename)) tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
...@@ -170,7 +170,7 @@ class TDTestCase: ...@@ -170,7 +170,7 @@ class TDTestCase:
for row_num in range(append_nums): for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename)) tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename)) os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
...@@ -197,7 +197,7 @@ class TDTestCase: ...@@ -197,7 +197,7 @@ class TDTestCase:
time.sleep(0.1) time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1 count += 1
...@@ -218,7 +218,7 @@ class TDTestCase: ...@@ -218,7 +218,7 @@ class TDTestCase:
time.sleep(0.1) time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1 count += 1
def _get_stop_dnode_id(self,dbname): def _get_stop_dnode_id(self,dbname):
...@@ -256,8 +256,8 @@ class TDTestCase: ...@@ -256,8 +256,8 @@ class TDTestCase:
while status !="offline": while status !="offline":
time.sleep(0.1) time.sleep(0.1)
status = _get_status() status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self): def wait_start_dnode_OK(self):
...@@ -278,8 +278,8 @@ class TDTestCase: ...@@ -278,8 +278,8 @@ class TDTestCase:
while status !="ready": while status !="ready":
time.sleep(0.1) time.sleep(0.1)
status = _get_status() status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id)) tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def _parse_datetime(self,timestr): def _parse_datetime(self,timestr):
try: try:
...@@ -343,9 +343,9 @@ class TDTestCase: ...@@ -343,9 +343,9 @@ class TDTestCase:
elif isinstance(data, str): elif isinstance(data, str):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data)) (sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, datetime.date): # elif isinstance(data, datetime.date):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % # tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data)) # (sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, float): elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data)) (sql, row, col, tdSql.queryResult[row][col], data))
...@@ -390,15 +390,15 @@ class TDTestCase: ...@@ -390,15 +390,15 @@ class TDTestCase:
# append rows of stablename when dnode stop # append rows of stablename when dnode stop
tbname = "sub_{}_{}".format(stablename , 0) tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 ) self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100) self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables # create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin start dnode # begin start dnode
...@@ -410,9 +410,9 @@ class TDTestCase: ...@@ -410,9 +410,9 @@ class TDTestCase:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
# create new stables again # create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
def unsync_run_case(self): def unsync_run_case(self):
...@@ -448,7 +448,7 @@ class TDTestCase: ...@@ -448,7 +448,7 @@ class TDTestCase:
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name)) tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
# create sync threading and start it # create sync threading and start it
self.current_thread = _create_threading(db_name) self.current_thread = _create_threading(db_name)
...@@ -458,21 +458,21 @@ class TDTestCase: ...@@ -458,21 +458,21 @@ class TDTestCase:
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
tbname = "sub_{}_{}".format(stablename , 0) tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 ) self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100) self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables # create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# create new stables again # create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
self.current_thread.join() self.current_thread.join()
......
...@@ -80,14 +80,14 @@ class TDTestCase: ...@@ -80,14 +80,14 @@ class TDTestCase:
is_leader=True is_leader=True
if count==1 and is_leader: if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====") tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
else: else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items(): for k ,v in self.dnode_list.items():
if k == mnode_name: if k == mnode_name:
if v[3]==0: if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
...@@ -130,7 +130,7 @@ class TDTestCase: ...@@ -130,7 +130,7 @@ class TDTestCase:
for k , v in vgroups_infos.items(): for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader": if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else: else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
...@@ -138,7 +138,7 @@ class TDTestCase: ...@@ -138,7 +138,7 @@ class TDTestCase:
drop_db_sql = "drop database if exists {}".format(dbname) drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname)) tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql) tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql) tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname)) tdSql.execute("use {}".format(dbname))
...@@ -161,7 +161,7 @@ class TDTestCase: ...@@ -161,7 +161,7 @@ class TDTestCase:
ts = self.ts + self.ts_step*row_num ts = self.ts + self.ts_step*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename)) tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
...@@ -170,7 +170,7 @@ class TDTestCase: ...@@ -170,7 +170,7 @@ class TDTestCase:
for row_num in range(append_nums): for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename)) tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename)) os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
...@@ -197,7 +197,7 @@ class TDTestCase: ...@@ -197,7 +197,7 @@ class TDTestCase:
time.sleep(0.1) time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1 count += 1
...@@ -218,7 +218,7 @@ class TDTestCase: ...@@ -218,7 +218,7 @@ class TDTestCase:
time.sleep(0.1) time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1 count += 1
def _get_stop_dnode_id(self,dbname): def _get_stop_dnode_id(self,dbname):
...@@ -256,8 +256,8 @@ class TDTestCase: ...@@ -256,8 +256,8 @@ class TDTestCase:
while status !="offline": while status !="offline":
time.sleep(0.1) time.sleep(0.1)
status = _get_status() status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self): def wait_start_dnode_OK(self):
...@@ -278,8 +278,8 @@ class TDTestCase: ...@@ -278,8 +278,8 @@ class TDTestCase:
while status !="ready": while status !="ready":
time.sleep(0.1) time.sleep(0.1)
status = _get_status() status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id)) tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def _parse_datetime(self,timestr): def _parse_datetime(self,timestr):
try: try:
...@@ -343,9 +343,9 @@ class TDTestCase: ...@@ -343,9 +343,9 @@ class TDTestCase:
elif isinstance(data, str): elif isinstance(data, str):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data)) (sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, datetime.date): # elif isinstance(data, datetime.date):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % # tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data)) # (sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, float): elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data)) (sql, row, col, tdSql.queryResult[row][col], data))
...@@ -390,15 +390,15 @@ class TDTestCase: ...@@ -390,15 +390,15 @@ class TDTestCase:
# append rows of stablename when dnode stop # append rows of stablename when dnode stop
tbname = "sub_{}_{}".format(stablename , 0) tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 ) self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100) self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables # create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# begin start dnode # begin start dnode
...@@ -410,9 +410,9 @@ class TDTestCase: ...@@ -410,9 +410,9 @@ class TDTestCase:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
# create new stables again # create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
def unsync_run_case(self): def unsync_run_case(self):
...@@ -453,7 +453,7 @@ class TDTestCase: ...@@ -453,7 +453,7 @@ class TDTestCase:
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name)) tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
# create sync threading and start it # create sync threading and start it
self.current_thread = _create_threading(db_name) self.current_thread = _create_threading(db_name)
...@@ -463,21 +463,21 @@ class TDTestCase: ...@@ -463,21 +463,21 @@ class TDTestCase:
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0)
tbname = "sub_{}_{}".format(stablename , 0) tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 ) self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100) self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables # create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# create new stables again # create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
self.current_thread.join() self.current_thread.join()
...@@ -493,7 +493,7 @@ class TDTestCase: ...@@ -493,7 +493,7 @@ class TDTestCase:
else: else:
continue continue
if port: if port:
tdLog.info(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id)) tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port) psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
processID = subprocess.check_output( processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8") psCmd, shell=True).decode("utf-8")
......
...@@ -114,9 +114,9 @@ class TDTestCase: ...@@ -114,9 +114,9 @@ class TDTestCase:
elif isinstance(data, str): elif isinstance(data, str):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data)) (sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, datetime.date): # elif isinstance(data, datetime.date):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % # tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data)) # (sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, float): elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data)) (sql, row, col, tdSql.queryResult[row][col], data))
...@@ -163,14 +163,14 @@ class TDTestCase: ...@@ -163,14 +163,14 @@ class TDTestCase:
is_leader=True is_leader=True
if count==1 and is_leader: if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====") tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
else: else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items(): for k ,v in self.dnode_list.items():
if k == mnode_name: if k == mnode_name:
if v[3]==0: if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
...@@ -213,7 +213,7 @@ class TDTestCase: ...@@ -213,7 +213,7 @@ class TDTestCase:
for k , v in vgroups_infos.items(): for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader": if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else: else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
...@@ -221,7 +221,7 @@ class TDTestCase: ...@@ -221,7 +221,7 @@ class TDTestCase:
drop_db_sql = "drop database if exists {}".format(dbname) drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.info(" ==== create database {} and insert rows begin =====".format(dbname)) tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql) tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql) tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname)) tdSql.execute("use {}".format(dbname))
...@@ -244,7 +244,7 @@ class TDTestCase: ...@@ -244,7 +244,7 @@ class TDTestCase:
ts = self.ts + self.ts_step*row_num ts = self.ts + self.ts_step*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== stable {} insert rows execute end =====".format(stablename)) tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
...@@ -253,7 +253,7 @@ class TDTestCase: ...@@ -253,7 +253,7 @@ class TDTestCase:
for row_num in range(append_nums): for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.info(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename)) tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename)) os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
...@@ -280,7 +280,7 @@ class TDTestCase: ...@@ -280,7 +280,7 @@ class TDTestCase:
time.sleep(0.1) time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.info(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {} ====".format(count , dbname))
count += 1 count += 1
...@@ -301,7 +301,7 @@ class TDTestCase: ...@@ -301,7 +301,7 @@ class TDTestCase:
time.sleep(0.1) time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.info(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1 count += 1
def _get_stop_dnode_id(self,dbname): def _get_stop_dnode_id(self,dbname):
...@@ -340,8 +340,8 @@ class TDTestCase: ...@@ -340,8 +340,8 @@ class TDTestCase:
while status !="offline": while status !="offline":
time.sleep(0.1) time.sleep(0.1)
status = _get_status() status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self): def wait_start_dnode_OK(self):
...@@ -362,8 +362,8 @@ class TDTestCase: ...@@ -362,8 +362,8 @@ class TDTestCase:
while status !="ready": while status !="ready":
time.sleep(0.1) time.sleep(0.1)
status = _get_status() status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id)) tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def get_leader_infos(self ,dbname): def get_leader_infos(self ,dbname):
...@@ -390,10 +390,10 @@ class TDTestCase: ...@@ -390,10 +390,10 @@ class TDTestCase:
if role==self.stop_dnode_id: if role==self.stop_dnode_id:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info: if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
tdLog.info(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1])) tdLog.notice(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1]))
check_status = True check_status = True
elif vgroup_info[ind+1] !="offline": elif vgroup_info[ind+1] !="offline":
tdLog.info(" === dnode {} should be offline ".format(self.stop_dnode_id)) tdLog.notice(" === dnode {} should be offline ".format(self.stop_dnode_id))
else: else:
continue continue
break break
...@@ -410,7 +410,7 @@ class TDTestCase: ...@@ -410,7 +410,7 @@ class TDTestCase:
else: else:
continue continue
if port: if port:
tdLog.info(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id)) tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port) psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
processID = subprocess.check_output( processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8") psCmd, shell=True).decode("utf-8")
...@@ -457,18 +457,18 @@ class TDTestCase: ...@@ -457,18 +457,18 @@ class TDTestCase:
if revote_status: if revote_status:
tbname = "sub_{}_{}".format(stablename , 0) tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 ) self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100) self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables # create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
else: else:
tdLog.info("===== leader of database {} is not ok , append rows fail =====".format(db_name)) tdLog.notice("===== leader of database {} is not ok , append rows fail =====".format(db_name))
# begin start dnode # begin start dnode
start = time.time() start = time.time()
...@@ -480,9 +480,9 @@ class TDTestCase: ...@@ -480,9 +480,9 @@ class TDTestCase:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
# create new stables again # create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
def unsync_run_case(self): def unsync_run_case(self):
...@@ -509,21 +509,21 @@ class TDTestCase: ...@@ -509,21 +509,21 @@ class TDTestCase:
revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos) revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos)
tbname = "sub_{}_{}".format(stablename , 0) tbname = "sub_{}_{}".format(stablename , 0)
tdLog.info(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 ) self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 )
tdLog.info(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) tdLog.notice(" ==== check append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id))
self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100) self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=100)
# create new stables # create new stables
tdLog.info(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb1' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
# create new stables again # create new stables again
tdLog.info(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id))
self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0) self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0)
...@@ -551,7 +551,7 @@ class TDTestCase: ...@@ -551,7 +551,7 @@ class TDTestCase:
self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 )
tdLog.info(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name)) tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name))
# create sync threading and start it # create sync threading and start it
self.current_thread = _create_threading(db_name) self.current_thread = _create_threading(db_name)
......
# author : wenzhouwww
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import datetime
import inspect
import time
import socket
import subprocess
import threading
sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
self.thread_list = []
self.max_restart_time = 10
self.try_check_times = 10
self.query_times = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_database(self, dbname, replica_num ,vgroup_nums ):
drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
'''create table {}
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
# insert datas about new database
for row_num in range(row_nums):
ts = self.ts + self.ts_step*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1
def _get_stop_dnode_id(self,dbname):
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
# print(ind,leader_infos)
self.stop_dnode_id = leader_infos[ind-1]
break
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="offline":
time.sleep(0.1)
status = _get_status()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="ready":
time.sleep(0.1)
status = _get_status()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def _parse_datetime(self,timestr):
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
pass
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
def mycheckRowCol(self, sql, row, col):
caller = inspect.getframeinfo(inspect.stack()[2][0])
if row < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
if col < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
if row > tdSql.queryRows:
args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
if col > tdSql.queryCols:
args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
def mycheckData(self, sql ,row, col, data):
check_status = True
self.mycheckRowCol(sql ,row, col)
if tdSql.queryResult[row][col] != data:
if tdSql.cursor.istype(col, "TIMESTAMP"):
# suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28):
if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
if tdSql.queryResult[row][col] == self._parse_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
if str(tdSql.queryResult[row][col]) == str(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001:
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data)
tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
check_status = False
if data is None:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, str):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
# elif isinstance(data, datetime.date):
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
# (sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
(sql, row, col, tdSql.queryResult[row][col], data))
return check_status
def mycheckRows(self, sql, expectRows):
check_status = True
if len(tdSql.queryResult) == expectRows:
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows))
return True
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows)
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def force_stop_dnode(self, dnode_id ):
tdSql.query("show dnodes")
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
port = dnode_info[1].split(":")[-1]
break
else:
continue
if port:
tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
ps_kill_taosd = ''' kill -9 {} '''.format(processID)
# print(ps_kill_taosd)
os.system(ps_kill_taosd)
def basic_query_task(self,dbname ,stablename):
sql = "select * from {}.{} ;".format(dbname , stablename)
count = 0
while count < self.query_times:
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
count += 1
def multi_thread_query_task(self, thread_nums ,dbname , stablename ):
for i in range(thread_nums):
task = threading.Thread(target = self.basic_query_task, args=(dbname ,stablename))
self.thread_list.append(task)
for thread in self.thread_list:
thread.start()
return self.thread_list
def stop_follower_when_query_going(self):
tdDnodes = cluster.dnodes
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
# let query task start
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
# force stop follower
for loop in range(self.loop_restart_times):
tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name))
self.stop_dnode_id = self._get_stop_dnode_id(self.db_name)
tdDnodes[self.stop_dnode_id-1].stoptaosd()
self.wait_stop_dnode_OK()
start = time.time()
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
for thread in self.thread_list:
thread.join()
def run(self):
# basic check of cluster
self.check_setup_cluster_status()
self.stop_follower_when_query_going()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
# author : wenzhouwww
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import datetime
import inspect
import time
import socket
import subprocess
import threading
sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
self.thread_list = []
self.max_restart_time = 10
self.try_check_times = 10
self.query_times = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_database(self, dbname, replica_num ,vgroup_nums ):
drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
'''create table {}
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
# insert datas about new database
for row_num in range(row_nums):
ts = self.ts + self.ts_step*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1
def _get_stop_dnode_id(self,dbname):
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='follower':
# print(ind,leader_infos)
self.stop_dnode_id = leader_infos[ind-1]
break
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="offline":
time.sleep(0.1)
status = _get_status()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="ready":
time.sleep(0.1)
status = _get_status()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def _parse_datetime(self,timestr):
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
pass
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
def mycheckRowCol(self, sql, row, col):
caller = inspect.getframeinfo(inspect.stack()[2][0])
if row < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
if col < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
if row > tdSql.queryRows:
args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
if col > tdSql.queryCols:
args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
def mycheckData(self, sql ,row, col, data):
check_status = True
self.mycheckRowCol(sql ,row, col)
if tdSql.queryResult[row][col] != data:
if tdSql.cursor.istype(col, "TIMESTAMP"):
# suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28):
if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
if tdSql.queryResult[row][col] == self._parse_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
if str(tdSql.queryResult[row][col]) == str(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001:
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data)
tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
check_status = False
if data is None:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, str):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
# elif isinstance(data, datetime.date):
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
# (sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
(sql, row, col, tdSql.queryResult[row][col], data))
return check_status
def mycheckRows(self, sql, expectRows):
check_status = True
if len(tdSql.queryResult) == expectRows:
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows))
return True
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows)
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def force_stop_dnode(self, dnode_id ):
tdSql.query("show dnodes")
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
port = dnode_info[1].split(":")[-1]
break
else:
continue
if port:
tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
ps_kill_taosd = ''' kill -9 {} '''.format(processID)
# print(ps_kill_taosd)
os.system(ps_kill_taosd)
def basic_query_task(self,dbname ,stablename):
sql = "select * from {}.{} ;".format(dbname , stablename)
count = 0
while count < self.query_times:
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
count += 1
def multi_thread_query_task(self, thread_nums ,dbname , stablename ):
for i in range(thread_nums):
task = threading.Thread(target = self.basic_query_task, args=(dbname ,stablename))
self.thread_list.append(task)
for thread in self.thread_list:
thread.start()
return self.thread_list
def stop_follower_when_query_going(self):
tdDnodes = cluster.dnodes
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
# let query task start
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
# force stop follower
for loop in range(self.loop_restart_times):
tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name))
self.stop_dnode_id = self._get_stop_dnode_id(self.db_name)
self.force_stop_dnode(self.stop_dnode_id)
self.wait_stop_dnode_OK()
start = time.time()
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
for thread in self.thread_list:
thread.join()
def run(self):
# basic check of cluster
self.check_setup_cluster_status()
self.stop_follower_when_query_going()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
# author : wenzhouwww
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import datetime
import inspect
import time
import socket
import subprocess
import threading
sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
self.thread_list = []
self.max_restart_time = 10
self.try_check_times = 10
self.query_times = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_database(self, dbname, replica_num ,vgroup_nums ):
drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
'''create table {}
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
# insert datas about new database
for row_num in range(row_nums):
ts = self.ts + self.ts_step*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1
def _get_stop_dnode_id(self,dbname):
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='leader':
# print(ind,leader_infos)
self.stop_dnode_id = leader_infos[ind-1]
break
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="offline":
time.sleep(0.1)
status = _get_status()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def check_revote_leader_success(self, dbname, before_leader_infos , after_leader_infos):
check_status = False
vote_act = set(set(after_leader_infos)-set(before_leader_infos))
if not vote_act:
print("=======before_revote_leader_infos ======\n" , before_leader_infos)
print("=======after_revote_leader_infos ======\n" , after_leader_infos)
tdLog.info(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
for ind , role in enumerate(vgroup_info):
if role==self.stop_dnode_id:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
tdLog.notice(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1]))
check_status = True
elif vgroup_info[ind+1] !="offline":
tdLog.notice(" === dnode {} should be offline ".format(self.stop_dnode_id))
else:
continue
break
return check_status
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="ready":
time.sleep(0.1)
status = _get_status()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def _parse_datetime(self,timestr):
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
pass
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
def mycheckRowCol(self, sql, row, col):
caller = inspect.getframeinfo(inspect.stack()[2][0])
if row < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
if col < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
if row > tdSql.queryRows:
args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
if col > tdSql.queryCols:
args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
def mycheckData(self, sql ,row, col, data):
check_status = True
self.mycheckRowCol(sql ,row, col)
if tdSql.queryResult[row][col] != data:
if tdSql.cursor.istype(col, "TIMESTAMP"):
# suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28):
if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
if tdSql.queryResult[row][col] == self._parse_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
if str(tdSql.queryResult[row][col]) == str(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001:
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data)
tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
check_status = False
if data is None:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, str):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
# elif isinstance(data, datetime.date):
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
# (sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
(sql, row, col, tdSql.queryResult[row][col], data))
return check_status
def mycheckRows(self, sql, expectRows):
check_status = True
if len(tdSql.queryResult) == expectRows:
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows))
return True
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows)
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def get_leader_infos(self ,dbname):
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
leader_infos = set()
for vgroup_info in vgroup_infos:
leader_infos.add(vgroup_info[3:-4])
return leader_infos
def force_stop_dnode(self, dnode_id ):
tdSql.query("show dnodes")
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
port = dnode_info[1].split(":")[-1]
break
else:
continue
if port:
tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
ps_kill_taosd = ''' kill -9 {} '''.format(processID)
# print(ps_kill_taosd)
os.system(ps_kill_taosd)
def basic_query_task(self,dbname ,stablename):
sql = "select * from {}.{} ;".format(dbname , stablename)
count = 0
while count < self.query_times:
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
count += 1
def multi_thread_query_task(self, thread_nums ,dbname , stablename ):
for i in range(thread_nums):
task = threading.Thread(target = self.basic_query_task, args=(dbname ,stablename))
self.thread_list.append(task)
for thread in self.thread_list:
thread.start()
return self.thread_list
def stop_follower_when_query_going(self):
tdDnodes = cluster.dnodes
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
# let query task start
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
# force stop follower
for loop in range(self.loop_restart_times):
tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name))
# get leader info before stop
before_leader_infos = self.get_leader_infos(self.db_name)
self.stop_dnode_id = self._get_stop_dnode_id(self.db_name)
tdDnodes[self.stop_dnode_id-1].stoptaosd()
start = time.time()
# get leader info after stop
after_leader_infos = self.get_leader_infos(self.db_name)
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
while not revote_status:
after_leader_infos = self.get_leader_infos(self.db_name)
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
end = time.time()
time_cost = end - start
tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(self.db_name , time_cost))
self.wait_stop_dnode_OK()
start = time.time()
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
for thread in self.thread_list:
thread.join()
def run(self):
# basic check of cluster
self.check_setup_cluster_status()
self.stop_follower_when_query_going()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
# author : wenzhouwww
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
import datetime
import inspect
import time
import socket
import subprocess
import threading
sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self,conn ,logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
self.mnode_list = {}
self.dnode_list = {}
self.ts = 1483200000000
self.ts_step =1000
self.db_name ='testdb'
self.replica = 3
self.vgroups = 1
self.tb_nums = 10
self.row_nums = 100
self.stop_dnode_id = None
self.loop_restart_times = 5
self.thread_list = []
self.max_restart_time = 10
self.try_check_times = 10
self.query_times = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def check_setup_cluster_status(self):
tdSql.query("show mnodes")
for mnode in tdSql.queryResult:
name = mnode[1]
info = mnode
self.mnode_list[name] = info
tdSql.query("show dnodes")
for dnode in tdSql.queryResult:
name = dnode[1]
info = dnode
self.dnode_list[name] = info
count = 0
is_leader = False
mnode_name = ''
for k,v in self.mnode_list.items():
count +=1
# only for 1 mnode
mnode_name = k
if v[2] =='leader':
is_leader=True
if count==1 and is_leader:
tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items():
if k == mnode_name:
if v[3]==0:
tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else:
continue
def create_database(self, dbname, replica_num ,vgroup_nums ):
drop_db_sql = "drop database if exists {}".format(dbname)
create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums)
tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname))
tdSql.execute(drop_db_sql)
tdSql.execute(create_db_sql)
tdSql.execute("use {}".format(dbname))
def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums):
tdSql.execute("use {}".format(dbname))
tdSql.execute(
'''create table {}
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''.format(stablename)
)
for i in range(tb_nums):
sub_tbname = "sub_{}_{}".format(stablename,i)
tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i))
# insert datas about new database
for row_num in range(row_nums):
ts = self.ts + self.ts_step*row_num
tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename))
def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ):
tdSql.execute("use {}".format(dbname))
for row_num in range(append_nums):
tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
# print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ")
tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename))
os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename))
def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows):
tdSql.execute("use {}".format(dbname))
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups; '".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select count(*) from {}.{}".format(dbname,stablename))
status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows)
tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname))
count += 1
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
count = 0
while not status_OK :
if count > self.try_check_times:
os.system("taos -s ' show {}.vgroups;'".format(dbname))
tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname))
break
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
while not tdSql.queryResult:
time.sleep(0.1)
tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename))
status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums)
tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname))
count += 1
def _get_stop_dnode_id(self,dbname):
tdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = tdSql.queryResult
for vgroup_info in vgroup_infos:
leader_infos = vgroup_info[3:-4]
# print(vgroup_info)
for ind ,role in enumerate(leader_infos):
if role =='leader':
# print(ind,leader_infos)
self.stop_dnode_id = leader_infos[ind-1]
break
return self.stop_dnode_id
def wait_stop_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="offline":
time.sleep(0.1)
status = _get_status()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id))
def check_revote_leader_success(self, dbname, before_leader_infos , after_leader_infos):
check_status = False
vote_act = set(set(after_leader_infos)-set(before_leader_infos))
if not vote_act:
print("=======before_revote_leader_infos ======\n" , before_leader_infos)
print("=======after_revote_leader_infos ======\n" , after_leader_infos)
tdLog.info(" ===maybe revote not occured , there is no dnode offline ====")
else:
for vgroup_info in vote_act:
for ind , role in enumerate(vgroup_info):
if role==self.stop_dnode_id:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
tdLog.notice(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1]))
check_status = True
elif vgroup_info[ind+1] !="offline":
tdLog.notice(" === dnode {} should be offline ".format(self.stop_dnode_id))
else:
continue
break
return check_status
def wait_start_dnode_OK(self):
def _get_status():
newTdSql=tdCom.newTdSql()
status = ""
newTdSql.query("show dnodes")
dnode_infos = newTdSql.queryResult
for dnode_info in dnode_infos:
id = dnode_info[0]
dnode_status = dnode_info[4]
if id == self.stop_dnode_id:
status = dnode_status
break
return status
status = _get_status()
while status !="ready":
time.sleep(0.1)
status = _get_status()
# tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id))
def _parse_datetime(self,timestr):
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
pass
try:
return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S')
except ValueError:
pass
def mycheckRowCol(self, sql, row, col):
caller = inspect.getframeinfo(inspect.stack()[2][0])
if row < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args)
if col < 0:
args = (caller.filename, caller.lineno, sql, row)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args)
if row > tdSql.queryRows:
args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows)
tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args)
if col > tdSql.queryCols:
args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols)
tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args)
def mycheckData(self, sql ,row, col, data):
check_status = True
self.mycheckRowCol(sql ,row, col)
if tdSql.queryResult[row][col] != data:
if tdSql.cursor.istype(col, "TIMESTAMP"):
# suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28):
if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
if tdSql.queryResult[row][col] == self._parse_datetime(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
if str(tdSql.queryResult[row][col]) == str(data):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001:
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
(sql, row, col, tdSql.queryResult[row][col], data))
return
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data)
tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
check_status = False
if data is None:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, str):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
# elif isinstance(data, datetime.date):
# tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
# (sql, row, col, tdSql.queryResult[row][col], data))
elif isinstance(data, float):
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(sql, row, col, tdSql.queryResult[row][col], data))
else:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
(sql, row, col, tdSql.queryResult[row][col], data))
return check_status
def mycheckRows(self, sql, expectRows):
check_status = True
if len(tdSql.queryResult) == expectRows:
tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows))
return True
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows)
tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args)
check_status = False
return check_status
def get_leader_infos(self ,dbname):
newTdSql=tdCom.newTdSql()
newTdSql.query("show {}.vgroups".format(dbname))
vgroup_infos = newTdSql.queryResult
leader_infos = set()
for vgroup_info in vgroup_infos:
leader_infos.add(vgroup_info[3:-4])
return leader_infos
def force_stop_dnode(self, dnode_id ):
tdSql.query("show dnodes")
port = None
for dnode_info in tdSql.queryResult:
if dnode_id == dnode_info[0]:
port = dnode_info[1].split(":")[-1]
break
else:
continue
if port:
tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id))
psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
ps_kill_taosd = ''' kill -9 {} '''.format(processID)
# print(ps_kill_taosd)
os.system(ps_kill_taosd)
def basic_query_task(self,dbname ,stablename):
sql = "select * from {}.{} ;".format(dbname , stablename)
count = 0
while count < self.query_times:
os.system(''' taos -s '{}' >>/dev/null '''.format(sql))
count += 1
def multi_thread_query_task(self, thread_nums ,dbname , stablename ):
for i in range(thread_nums):
task = threading.Thread(target = self.basic_query_task, args=(dbname ,stablename))
self.thread_list.append(task)
for thread in self.thread_list:
thread.start()
return self.thread_list
def stop_follower_when_query_going(self):
tdDnodes = cluster.dnodes
self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1)
self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums)
# let query task start
self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' )
# force stop follower
for loop in range(self.loop_restart_times):
tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name))
# get leader info before stop
before_leader_infos = self.get_leader_infos(self.db_name)
self.stop_dnode_id = self._get_stop_dnode_id(self.db_name)
self.force_stop_dnode(self.stop_dnode_id)
start = time.time()
# get leader info after stop
after_leader_infos = self.get_leader_infos(self.db_name)
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
while not revote_status:
after_leader_infos = self.get_leader_infos(self.db_name)
revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos)
end = time.time()
time_cost = end - start
tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(self.db_name , time_cost))
self.wait_stop_dnode_OK()
start = time.time()
tdDnodes[self.stop_dnode_id-1].starttaosd()
self.wait_start_dnode_OK()
end = time.time()
time_cost = int(end-start)
if time_cost > self.max_restart_time:
tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id))
for thread in self.thread_list:
thread.join()
def run(self):
# basic check of cluster
self.check_setup_cluster_status()
self.stop_follower_when_query_going()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
...@@ -71,14 +71,14 @@ class TDTestCase: ...@@ -71,14 +71,14 @@ class TDTestCase:
is_leader=True is_leader=True
if count==1 and is_leader: if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====") tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
else: else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items(): for k ,v in self.dnode_list.items():
if k == mnode_name: if k == mnode_name:
if v[3]==0: if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
...@@ -121,7 +121,7 @@ class TDTestCase: ...@@ -121,7 +121,7 @@ class TDTestCase:
for k , v in vgroups_infos.items(): for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader": if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else: else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
...@@ -152,10 +152,10 @@ class TDTestCase: ...@@ -152,10 +152,10 @@ class TDTestCase:
time.sleep(0.1) time.sleep(0.1)
status = self.check_vgroups_init_done(dbname) status = self.check_vgroups_init_done(dbname)
# tdLog.info("=== database {} show vgroups vote the leader is in progress ===".format(dbname)) # tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end = time.time() end = time.time()
cost_time = end - start cost_time = end - start
tdLog.info(" ==== database %s vote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) ) tdLog.notice(" ==== database %s vote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) )
# os.system("taos -s 'show {}.vgroups;'".format(dbname)) # os.system("taos -s 'show {}.vgroups;'".format(dbname))
if cost_time >= self.max_vote_time_cost: if cost_time >= self.max_vote_time_cost:
tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) ) tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
...@@ -165,28 +165,28 @@ class TDTestCase: ...@@ -165,28 +165,28 @@ class TDTestCase:
def test_init_vgroups_time_costs(self): def test_init_vgroups_time_costs(self):
tdLog.info(" ====start check time cost about vgroups vote leaders ==== ") tdLog.notice(" ====start check time cost about vgroups vote leaders ==== ")
tdLog.info(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost)) tdLog.notice(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost))
# create database replica 3 vgroups 1 # create database replica 3 vgroups 1
db1 = 'db_1' db1 = 'db_1'
create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1) create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1)
tdLog.info('=======database {} replica 3 vgroups 1 ======'.format(db1)) tdLog.notice('=======database {} replica 3 vgroups 1 ======'.format(db1))
tdSql.execute(create_db_replica_3_vgroups_1) tdSql.execute(create_db_replica_3_vgroups_1)
self.vote_leader_time_costs(db1) self.vote_leader_time_costs(db1)
# create database replica 3 vgroups 10 # create database replica 3 vgroups 10
db2 = 'db_2' db2 = 'db_2'
create_db_replica_3_vgroups_10 = "create database {} replica 3 vgroups 10".format(db2) create_db_replica_3_vgroups_10 = "create database {} replica 3 vgroups 10".format(db2)
tdLog.info('=======database {} replica 3 vgroups 10 ======'.format(db2)) tdLog.notice('=======database {} replica 3 vgroups 10 ======'.format(db2))
tdSql.execute(create_db_replica_3_vgroups_10) tdSql.execute(create_db_replica_3_vgroups_10)
self.vote_leader_time_costs(db2) self.vote_leader_time_costs(db2)
# create database replica 3 vgroups 100 # create database replica 3 vgroups 100
db3 = 'db_3' db3 = 'db_3'
create_db_replica_3_vgroups_100 = "create database {} replica 3 vgroups 100".format(db3) create_db_replica_3_vgroups_100 = "create database {} replica 3 vgroups 100".format(db3)
tdLog.info('=======database {} replica 3 vgroups 100 ======'.format(db3)) tdLog.notice('=======database {} replica 3 vgroups 100 ======'.format(db3))
tdSql.execute(create_db_replica_3_vgroups_100) tdSql.execute(create_db_replica_3_vgroups_100)
self.vote_leader_time_costs(db3) self.vote_leader_time_costs(db3)
......
...@@ -74,14 +74,14 @@ class TDTestCase: ...@@ -74,14 +74,14 @@ class TDTestCase:
is_leader=True is_leader=True
if count==1 and is_leader: if count==1 and is_leader:
tdLog.info("===== depoly cluster success with 1 mnode as leader =====") tdLog.notice("===== depoly cluster success with 1 mnode as leader =====")
else: else:
tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====")
for k ,v in self.dnode_list.items(): for k ,v in self.dnode_list.items():
if k == mnode_name: if k == mnode_name:
if v[3]==0: if v[3]==0:
tdLog.info("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3]))
else: else:
...@@ -124,7 +124,7 @@ class TDTestCase: ...@@ -124,7 +124,7 @@ class TDTestCase:
for k , v in vgroups_infos.items(): for k , v in vgroups_infos.items():
if len(v) ==1 and v[0]=="leader": if len(v) ==1 and v[0]=="leader":
tdLog.info(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k))
else: else:
tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k))
...@@ -148,7 +148,7 @@ class TDTestCase: ...@@ -148,7 +148,7 @@ class TDTestCase:
if ind%2==0: if ind%2==0:
if role == stop_dnode_id and vgroups_leader_follower[ind+1]=="offline": if role == stop_dnode_id and vgroups_leader_follower[ind+1]=="offline":
tdLog.info("====== dnode {} has offline , endpoint is {}".format(stop_dnode_id , self.stop_dnode)) tdLog.notice("====== dnode {} has offline , endpoint is {}".format(stop_dnode_id , self.stop_dnode))
elif role == stop_dnode_id : elif role == stop_dnode_id :
tdLog.exit("====== dnode {} has not offline , endpoint is {}".format(stop_dnode_id , self.stop_dnode)) tdLog.exit("====== dnode {} has not offline , endpoint is {}".format(stop_dnode_id , self.stop_dnode))
else: else:
...@@ -180,8 +180,8 @@ class TDTestCase: ...@@ -180,8 +180,8 @@ class TDTestCase:
while status !="offline": while status !="offline":
time.sleep(0.1) time.sleep(0.1)
status = _get_status() status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , endpoint is {}".format(self.stop_dnode))
def wait_start_dnode_OK(self): def wait_start_dnode_OK(self):
...@@ -202,15 +202,15 @@ class TDTestCase: ...@@ -202,15 +202,15 @@ class TDTestCase:
while status !="ready": while status !="ready":
time.sleep(0.1) time.sleep(0.1)
status = _get_status() status = _get_status()
# tdLog.info("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode))
tdLog.info("==== stop_dnode has restart , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has restart , endpoint is {}".format(self.stop_dnode))
def random_stop_One_dnode(self): def random_stop_One_dnode(self):
self.stop_dnode = self._get_stop_dnode() self.stop_dnode = self._get_stop_dnode()
stop_dnode_id = self.dnode_list[self.stop_dnode][0] stop_dnode_id = self.dnode_list[self.stop_dnode][0]
tdLog.info(" ==== dnode {} will offline ,endpoints is {} ====".format(stop_dnode_id , self.stop_dnode)) tdLog.notice(" ==== dnode {} will offline ,endpoints is {} ====".format(stop_dnode_id , self.stop_dnode))
tdDnodes=cluster.dnodes tdDnodes=cluster.dnodes
tdDnodes[stop_dnode_id-1].stoptaosd() tdDnodes[stop_dnode_id-1].stoptaosd()
self.wait_stop_dnode_OK() self.wait_stop_dnode_OK()
...@@ -250,10 +250,10 @@ class TDTestCase: ...@@ -250,10 +250,10 @@ class TDTestCase:
time.sleep(0.1) time.sleep(0.1)
status = self.check_vgroups_init_done(dbname) status = self.check_vgroups_init_done(dbname)
# tdLog.info("=== database {} show vgroups vote the leader is in progress ===".format(dbname)) # tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end = time.time() end = time.time()
cost_time = end - start cost_time = end - start
tdLog.info(" ==== database %s vote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) ) tdLog.notice(" ==== database %s vote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) )
# os.system("taos -s 'show {}.vgroups;'".format(dbname)) # os.system("taos -s 'show {}.vgroups;'".format(dbname))
if cost_time >= self.max_vote_time_cost: if cost_time >= self.max_vote_time_cost:
tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) ) tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
...@@ -269,10 +269,10 @@ class TDTestCase: ...@@ -269,10 +269,10 @@ class TDTestCase:
time.sleep(0.1) time.sleep(0.1)
status = self.check_vgroups_revote_leader(dbname) status = self.check_vgroups_revote_leader(dbname)
# tdLog.info("=== database {} show vgroups vote the leader is in progress ===".format(dbname)) # tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname))
end = time.time() end = time.time()
cost_time = end - start cost_time = end - start
tdLog.info(" ==== database %s revote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) ) tdLog.notice(" ==== database %s revote the leaders success , cost time is %.3f second ====="%(dbname,cost_time) )
# os.system("taos -s 'show {}.vgroups;'".format(dbname)) # os.system("taos -s 'show {}.vgroups;'".format(dbname))
if cost_time >= self.max_vote_time_cost: if cost_time >= self.max_vote_time_cost:
tdLog.exit(" ==== database %s revote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) ) tdLog.exit(" ==== database %s revote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) )
...@@ -306,7 +306,7 @@ class TDTestCase: ...@@ -306,7 +306,7 @@ class TDTestCase:
if role==self.dnode_list[self.stop_dnode][0]: if role==self.dnode_list[self.stop_dnode][0]:
if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info: if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info:
tdLog.info(" === revote leader ok , leader is {} now ====".format(list(vgroup_info).index("leader")-1)) tdLog.notice(" === revote leader ok , leader is {} now ====".format(list(vgroup_info).index("leader")-1))
elif vgroup_info[ind+1] !="offline": elif vgroup_info[ind+1] !="offline":
tdLog.exit(" === dnode {} should be offline ".format(self.stop_dnode)) tdLog.exit(" === dnode {} should be offline ".format(self.stop_dnode))
else: else:
...@@ -319,14 +319,14 @@ class TDTestCase: ...@@ -319,14 +319,14 @@ class TDTestCase:
self.Restart_stop_dnode() self.Restart_stop_dnode()
def test_init_vgroups_time_costs(self): def test_init_vgroups_time_costs(self):
tdLog.info(" ====start check time cost about vgroups vote leaders ==== ") tdLog.notice(" ====start check time cost about vgroups vote leaders ==== ")
tdLog.info(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost)) tdLog.notice(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost))
# create database replica 3 vgroups 1 # create database replica 3 vgroups 1
db1 = 'db_1' db1 = 'db_1'
create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1) create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1)
tdLog.info('=======database {} replica 3 vgroups 1 ======'.format(db1)) tdLog.notice('=======database {} replica 3 vgroups 1 ======'.format(db1))
tdSql.execute(create_db_replica_3_vgroups_1) tdSql.execute(create_db_replica_3_vgroups_1)
self.vote_leader_time_costs(db1) self.vote_leader_time_costs(db1)
self.exec_revote_action(db1) self.exec_revote_action(db1)
...@@ -334,7 +334,7 @@ class TDTestCase: ...@@ -334,7 +334,7 @@ class TDTestCase:
# create database replica 3 vgroups 10 # create database replica 3 vgroups 10
db2 = 'db_2' db2 = 'db_2'
create_db_replica_3_vgroups_10 = "create database {} replica 3 vgroups 10".format(db2) create_db_replica_3_vgroups_10 = "create database {} replica 3 vgroups 10".format(db2)
tdLog.info('=======database {} replica 3 vgroups 10 ======'.format(db2)) tdLog.notice('=======database {} replica 3 vgroups 10 ======'.format(db2))
tdSql.execute(create_db_replica_3_vgroups_10) tdSql.execute(create_db_replica_3_vgroups_10)
self.vote_leader_time_costs(db2) self.vote_leader_time_costs(db2)
self.exec_revote_action(db2) self.exec_revote_action(db2)
...@@ -342,7 +342,7 @@ class TDTestCase: ...@@ -342,7 +342,7 @@ class TDTestCase:
# create database replica 3 vgroups 100 # create database replica 3 vgroups 100
db3 = 'db_3' db3 = 'db_3'
create_db_replica_3_vgroups_100 = "create database {} replica 3 vgroups 100".format(db3) create_db_replica_3_vgroups_100 = "create database {} replica 3 vgroups 100".format(db3)
tdLog.info('=======database {} replica 3 vgroups 100 ======'.format(db3)) tdLog.notice('=======database {} replica 3 vgroups 100 ======'.format(db3))
tdSql.execute(create_db_replica_3_vgroups_100) tdSql.execute(create_db_replica_3_vgroups_100)
self.vote_leader_time_costs(db3) self.vote_leader_time_costs(db3)
self.exec_revote_action(db3) self.exec_revote_action(db3)
......
{
"filetype": "insert",
"cfgdir": "/etc/taos/",
"host": "localhost",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 10,
"create_table_thread_count": 10,
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 1000,
"num_of_records_per_req": 1000,
"databases": [
{
"dbinfo": {
"name": "db_2",
"drop": "no",
"vgroups": 1,
"replica": 3
},
"super_tables": [
{
"name": "stb1",
"childtable_count": 10,
"childtable_prefix": "sub_",
"auto_create_table": "yes",
"batch_create_tbl_num": 5000,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100000,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1000000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10,
"start_timestamp": "2015-05-01 00:00:00.000",
"sample_format": "csv",
"use_sample_ts": "no",
"tags_file": "",
"columns": [
{
"type": "INT",
"count": 1
},
{
"type": "TINYINT",
"count": 1
},
{
"type": "SMALLINT",
"count": 1
},
{
"type": "BIGINT",
"count": 1
},
{
"type": "UINT",
"count": 1
},
{
"type": "UTINYINT",
"count": 1
},
{
"type": "USMALLINT",
"count": 1
},
{
"type": "UBIGINT",
"count": 1
},
{
"type": "DOUBLE",
"count": 1
},
{
"type": "FLOAT",
"count": 1
},
{
"type": "BINARY",
"len": 40,
"count": 1
},
{
"type": "VARCHAR",
"len": 200,
"count": 1
},
{
"type": "nchar",
"len": 200,
"count": 1
}
],
"tags": [
{
"type": "INT",
"count": 1
},
{
"type": "BINARY",
"len": 100,
"count": 1
},
{
"type": "BOOL",
"count": 1
}
]
}
]
}
]
}
\ No newline at end of file
{
"filetype": "insert",
"cfgdir": "/etc/taos/",
"host": "localhost",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 1,
"create_table_thread_count": 1,
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 1000,
"num_of_records_per_req": 1000,
"databases": [
{
"dbinfo": {
"name": "db_1",
"drop": "no",
"vgroups": 1,
"replica": 3
},
"super_tables": [
{
"name": "stb1",
"childtable_count": 10,
"childtable_prefix": "sub_",
"auto_create_table": "yes",
"batch_create_tbl_num": 5000,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 10000,
"interlace_rows": 0,
"insert_interval": 0,
"max_sql_len": 1000000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 10,
"start_timestamp": "2015-05-01 00:00:00.000",
"sample_format": "csv",
"use_sample_ts": "no",
"tags_file": "",
"columns": [
{
"type": "INT",
"count": 1
},
{
"type": "TINYINT",
"count": 1
},
{
"type": "SMALLINT",
"count": 1
},
{
"type": "BIGINT",
"count": 1
},
{
"type": "UINT",
"count": 1
},
{
"type": "UTINYINT",
"count": 1
},
{
"type": "USMALLINT",
"count": 1
},
{
"type": "UBIGINT",
"count": 1
},
{
"type": "DOUBLE",
"count": 1
},
{
"type": "FLOAT",
"count": 1
},
{
"type": "BINARY",
"len": 40,
"count": 1
},
{
"type": "VARCHAR",
"len": 200,
"count": 1
},
{
"type": "nchar",
"len": 200,
"count": 1
}
],
"tags": [
{
"type": "INT",
"count": 1
},
{
"type": "BINARY",
"len": 100,
"count": 1
},
{
"type": "BOOL",
"count": 1
}
]
}
]
}
]
}
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册