提交 c1e9959b 编写于 作者: H Hongze Cheng

Merge branch 'develop' into feature/2.0tsdb

...@@ -237,9 +237,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) { ...@@ -237,9 +237,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
rpcMsg->code = TSDB_CODE_NETWORK_UNAVAIL; rpcMsg->code = TSDB_CODE_NETWORK_UNAVAIL;
} else { } else {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
if (rpcMsg->code == TSDB_CODE_NOT_ACTIVE_TABLE || rpcMsg->code == TSDB_CODE_INVALID_TABLE_ID || if (rpcMsg->code == TSDB_CODE_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_INVALID_VGROUP_ID ||
rpcMsg->code == TSDB_CODE_INVALID_VGROUP_ID || rpcMsg->code == TSDB_CODE_NOT_ACTIVE_VNODE || rpcMsg->code == TSDB_CODE_NETWORK_UNAVAIL) {
rpcMsg->code == TSDB_CODE_NETWORK_UNAVAIL || rpcMsg->code == TSDB_CODE_TABLE_ID_MISMATCH) {
/* /*
* not_active_table: 1. the virtual node may fail to create table, since the procedure of create table is asynchronized, * not_active_table: 1. the virtual node may fail to create table, since the procedure of create table is asynchronized,
* the virtual node may have not create table till now, so try again by using the new metermeta. * the virtual node may have not create table till now, so try again by using the new metermeta.
......
...@@ -579,9 +579,9 @@ void taos_free_result_imp(TAOS_RES *res, int keepCmd) { ...@@ -579,9 +579,9 @@ void taos_free_result_imp(TAOS_RES *res, int keepCmd) {
if ((pCmd->command == TSDB_SQL_SELECT || if ((pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_SHOW || pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_RETRIEVE || pCmd->command == TSDB_SQL_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH) && pCmd->command == TSDB_SQL_FETCH) && pRes->code == TSDB_CODE_SUCCESS &&
(pRes->code != TSDB_CODE_QUERY_CANCELLED && ((pCmd->command < TSDB_SQL_LOCAL && pRes->completed == false) || ((pCmd->command < TSDB_SQL_LOCAL && pRes->completed == false) ||
(pRes->code == TSDB_CODE_SUCCESS && pCmd->command == TSDB_SQL_SELECT && pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)))) { (pCmd->command == TSDB_SQL_SELECT && pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL))) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
tscTrace("%p send msg to free qhandle in vnode, code:%d, numOfRows:%d, command:%s", pSql, pRes->code, pRes->numOfRows, tscTrace("%p send msg to free qhandle in vnode, code:%d, numOfRows:%d, command:%s", pSql, pRes->code, pRes->numOfRows,
......
...@@ -1850,8 +1850,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void ...@@ -1850,8 +1850,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
STableMetaInfo* pFinalInfo = NULL; STableMetaInfo* pFinalInfo = NULL;
if (pPrevSql == NULL) { if (pPrevSql == NULL) {
STableMeta* pTableMeta = taosCacheAcquireByName(tscCacheHandle, name); STableMeta* pTableMeta = taosCacheAcquireByData(tscCacheHandle, pTableMetaInfo->pTableMeta); // get by name may failed due to the cache cleanup
// todo handle error
assert(pTableMeta != NULL); assert(pTableMeta != NULL);
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList); pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList);
} else { // transfer the ownership of pTableMeta to the newly create sql object. } else { // transfer the ownership of pTableMeta to the newly create sql object.
......
...@@ -22,7 +22,7 @@ extern "C" { ...@@ -22,7 +22,7 @@ extern "C" {
int32_t dnodeInitMgmt(); int32_t dnodeInitMgmt();
void dnodeCleanupMgmt(); void dnodeCleanupMgmt();
void dnodeDispatchToDnodeMgmt(SRpcMsg *rpcMsg); void dnodeDispatchToMgmtQueue(SRpcMsg *rpcMsg);
void* dnodeGetVnode(int32_t vgId); void* dnodeGetVnode(int32_t vgId);
int32_t dnodeGetVnodeStatus(void *pVnode); int32_t dnodeGetVnodeStatus(void *pVnode);
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "ttimer.h" #include "ttimer.h"
#include "tsdb.h" #include "tsdb.h"
#include "twal.h" #include "twal.h"
#include "tqueue.h"
#include "tsync.h" #include "tsync.h"
#include "ttime.h" #include "ttime.h"
#include "ttimer.h" #include "ttimer.h"
...@@ -42,10 +43,12 @@ void * tsDnodeTmr = NULL; ...@@ -42,10 +43,12 @@ void * tsDnodeTmr = NULL;
static void * tsStatusTimer = NULL; static void * tsStatusTimer = NULL;
static uint32_t tsRebootTime; static uint32_t tsRebootTime;
static SRpcIpSet tsDMnodeIpSetForPeer = {0}; static SRpcIpSet tsDMnodeIpSet = {0};
static SRpcIpSet tsDMnodeIpSetForShell = {0};
static SDMMnodeInfos tsDMnodeInfos = {0}; static SDMMnodeInfos tsDMnodeInfos = {0};
static SDMDnodeCfg tsDnodeCfg = {0}; static SDMDnodeCfg tsDnodeCfg = {0};
static taos_qset tsMgmtQset = NULL;
static taos_queue tsMgmtQueue = NULL;
static pthread_t tsQthread;
static void dnodeUpdateMnodeInfos(SDMMnodeInfos *pMnodes); static void dnodeUpdateMnodeInfos(SDMMnodeInfos *pMnodes);
static bool dnodeReadMnodeInfos(); static bool dnodeReadMnodeInfos();
...@@ -55,6 +58,7 @@ static bool dnodeReadDnodeCfg(); ...@@ -55,6 +58,7 @@ static bool dnodeReadDnodeCfg();
static void dnodeSaveDnodeCfg(); static void dnodeSaveDnodeCfg();
static void dnodeProcessStatusRsp(SRpcMsg *pMsg); static void dnodeProcessStatusRsp(SRpcMsg *pMsg);
static void dnodeSendStatusMsg(void *handle, void *tmrId); static void dnodeSendStatusMsg(void *handle, void *tmrId);
static void *dnodeProcessMgmtQueue(void *param);
static int32_t dnodeOpenVnodes(); static int32_t dnodeOpenVnodes();
static void dnodeCloseVnodes(); static void dnodeCloseVnodes();
...@@ -74,52 +78,64 @@ int32_t dnodeInitMgmt() { ...@@ -74,52 +78,64 @@ int32_t dnodeInitMgmt() {
dnodeReadDnodeCfg(); dnodeReadDnodeCfg();
tsRebootTime = taosGetTimestampSec(); tsRebootTime = taosGetTimestampSec();
tsDnodeTmr = taosTmrInit(100, 200, 60000, "DND-DM");
if (tsDnodeTmr == NULL) {
dError("failed to init dnode timer");
return -1;
}
if (!dnodeReadMnodeInfos()) { if (!dnodeReadMnodeInfos()) {
memset(&tsDMnodeIpSetForPeer, 0, sizeof(SRpcIpSet)); memset(&tsDMnodeIpSet, 0, sizeof(SRpcIpSet));
memset(&tsDMnodeIpSetForShell, 0, sizeof(SRpcIpSet));
memset(&tsDMnodeInfos, 0, sizeof(SDMMnodeInfos)); memset(&tsDMnodeInfos, 0, sizeof(SDMMnodeInfos));
tsDMnodeIpSetForPeer.numOfIps = 1; tsDMnodeIpSet.numOfIps = 1;
taosGetFqdnPortFromEp(tsFirst, tsDMnodeIpSetForPeer.fqdn[0], &tsDMnodeIpSetForPeer.port[0]); taosGetFqdnPortFromEp(tsFirst, tsDMnodeIpSet.fqdn[0], &tsDMnodeIpSet.port[0]);
tsDMnodeIpSetForPeer.port[0] += TSDB_PORT_DNODEDNODE;
tsDMnodeIpSetForShell.numOfIps = 1;
taosGetFqdnPortFromEp(tsFirst, tsDMnodeIpSetForShell.fqdn[0], &tsDMnodeIpSetForShell.port[0]);
tsDMnodeIpSetForShell.port[0] += TSDB_PORT_DNODESHELL;
if (strcmp(tsSecond, tsFirst) != 0) { if (strcmp(tsSecond, tsFirst) != 0) {
tsDMnodeIpSetForPeer.numOfIps = 2; tsDMnodeIpSet.numOfIps = 2;
taosGetFqdnPortFromEp(tsSecond, tsDMnodeIpSetForPeer.fqdn[1], &tsDMnodeIpSetForPeer.port[1]); taosGetFqdnPortFromEp(tsSecond, tsDMnodeIpSet.fqdn[1], &tsDMnodeIpSet.port[1]);
tsDMnodeIpSetForPeer.port[1] += TSDB_PORT_DNODEDNODE;
tsDMnodeIpSetForShell.numOfIps = 2;
taosGetFqdnPortFromEp(tsSecond, tsDMnodeIpSetForShell.fqdn[1], &tsDMnodeIpSetForShell.port[1]);
tsDMnodeIpSetForShell.port[1] += TSDB_PORT_DNODESHELL;
} }
} else { } else {
tsDMnodeIpSetForPeer.inUse = tsDMnodeInfos.inUse; tsDMnodeIpSet.inUse = tsDMnodeInfos.inUse;
tsDMnodeIpSetForPeer.numOfIps = tsDMnodeInfos.nodeNum; tsDMnodeIpSet.numOfIps = tsDMnodeInfos.nodeNum;
for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) { for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) {
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSetForPeer.fqdn[i], &tsDMnodeIpSetForPeer.port[i]); taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSet.fqdn[i], &tsDMnodeIpSet.port[i]);
tsDMnodeIpSetForPeer.port[i] += TSDB_PORT_DNODEDNODE;
} }
}
tsDMnodeIpSetForShell.inUse = tsDMnodeInfos.inUse; // create the queue and thread to handle the message
tsDMnodeIpSetForShell.numOfIps = tsDMnodeInfos.nodeNum; tsMgmtQset = taosOpenQset();
for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) { if (tsMgmtQset == NULL) {
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSetForShell.fqdn[i], &tsDMnodeIpSetForShell.port[i]); dError("failed to create the mgmt queue set");
tsDMnodeIpSetForShell.port[i] += TSDB_PORT_DNODESHELL; dnodeCleanupMgmt();
} return -1;
}
tsMgmtQueue = taosOpenQueue();
if (tsMgmtQueue == NULL) {
dError("failed to create the mgmt queue");
dnodeCleanupMgmt();
return -1;
}
taosAddIntoQset(tsMgmtQset, tsMgmtQueue, NULL);
pthread_attr_t thAttr;
pthread_attr_init(&thAttr);
pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE);
int32_t code = pthread_create(&tsQthread, &thAttr, dnodeProcessMgmtQueue, NULL);
pthread_attr_destroy(&thAttr);
if (code != 0) {
dError("failed to create thread to process mgmt queue, reason:%s", strerror(errno));
dnodeCleanupMgmt();
return -1;
} }
int32_t code = dnodeOpenVnodes(); code = dnodeOpenVnodes();
if (code != TSDB_CODE_SUCCESS) { if (code != TSDB_CODE_SUCCESS) {
dnodeCleanupMgmt();
return -1;
}
tsDnodeTmr = taosTmrInit(100, 200, 60000, "DND-DM");
if (tsDnodeTmr == NULL) {
dError("failed to init dnode timer");
dnodeCleanupMgmt();
return -1; return -1;
} }
...@@ -142,22 +158,62 @@ void dnodeCleanupMgmt() { ...@@ -142,22 +158,62 @@ void dnodeCleanupMgmt() {
} }
dnodeCloseVnodes(); dnodeCloseVnodes();
if (tsMgmtQset) taosQsetThreadResume(tsMgmtQset);
if (tsQthread) pthread_join(tsQthread, NULL);
if (tsMgmtQueue) taosCloseQueue(tsMgmtQueue);
if (tsMgmtQset) taosCloseQset(tsMgmtQset);
tsMgmtQset = NULL;
tsMgmtQueue = NULL;
} }
void dnodeDispatchToDnodeMgmt(SRpcMsg *pMsg) { void dnodeDispatchToMgmtQueue(SRpcMsg *pMsg) {
SRpcMsg rsp; void *item;
if (dnodeProcessMgmtMsgFp[pMsg->msgType]) { item = taosAllocateQitem(sizeof(SRpcMsg));
rsp.code = (*dnodeProcessMgmtMsgFp[pMsg->msgType])(pMsg); if (item) {
memcpy(item, pMsg, sizeof(SRpcMsg));
taosWriteQitem(tsMgmtQueue, 1, item);
} else { } else {
rsp.code = TSDB_CODE_MSG_NOT_PROCESSED; SRpcMsg rsp;
rsp.handle = pMsg->handle;
rsp.pCont = NULL;
rsp.code = TSDB_CODE_SERV_OUT_OF_MEMORY;
rpcSendResponse(&rsp);
rpcFreeCont(pMsg->pCont);
} }
}
static void *dnodeProcessMgmtQueue(void *param) {
SRpcMsg *pMsg;
SRpcMsg rsp;
int type;
void *handle;
while (1) {
if (taosReadQitemFromQset(tsMgmtQset, &type, (void **) &pMsg, &handle) == 0) {
dTrace("dnode mgmt got no message from qset, exit ...");
break;
}
dTrace("%p, msg:%s will be processed", pMsg->ahandle, taosMsg[pMsg->msgType]);
if (dnodeProcessMgmtMsgFp[pMsg->msgType]) {
rsp.code = (*dnodeProcessMgmtMsgFp[pMsg->msgType])(pMsg);
} else {
rsp.code = TSDB_CODE_MSG_NOT_PROCESSED;
}
rsp.handle = pMsg->handle;
rsp.pCont = NULL;
rpcSendResponse(&rsp);
rsp.handle = pMsg->handle; rpcFreeCont(pMsg->pCont);
rsp.pCont = NULL; taosFreeQitem(pMsg);
rpcSendResponse(&rsp); }
rpcFreeCont(pMsg->pCont); return NULL;
} }
static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) { static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) {
...@@ -284,22 +340,26 @@ static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg) { ...@@ -284,22 +340,26 @@ static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg) {
} }
void dnodeUpdateMnodeIpSetForPeer(SRpcIpSet *pIpSet) { void dnodeUpdateMnodeIpSetForPeer(SRpcIpSet *pIpSet) {
dPrint("mnode IP list for peer is changed, numOfIps:%d inUse:%d", pIpSet->numOfIps, pIpSet->inUse); dPrint("mnode IP list for is changed, numOfIps:%d inUse:%d", pIpSet->numOfIps, pIpSet->inUse);
for (int i = 0; i < pIpSet->numOfIps; ++i) { for (int i = 0; i < pIpSet->numOfIps; ++i) {
pIpSet->port[i] -= TSDB_PORT_DNODEDNODE;
dPrint("mnode index:%d %s:%u", i, pIpSet->fqdn[i], pIpSet->port[i]) dPrint("mnode index:%d %s:%u", i, pIpSet->fqdn[i], pIpSet->port[i])
} }
tsDMnodeIpSetForPeer = *pIpSet; tsDMnodeIpSet = *pIpSet;
} }
void dnodeGetMnodeIpSetForPeer(void *ipSetRaw) { void dnodeGetMnodeIpSetForPeer(void *ipSetRaw) {
SRpcIpSet *ipSet = ipSetRaw; SRpcIpSet *ipSet = ipSetRaw;
*ipSet = tsDMnodeIpSetForPeer; *ipSet = tsDMnodeIpSet;
for (int i=0; i<ipSet->numOfIps; ++i)
ipSet->port[i] += TSDB_PORT_DNODEDNODE;
} }
void dnodeGetMnodeIpSetForShell(void *ipSetRaw) { void dnodeGetMnodeIpSetForShell(void *ipSetRaw) {
SRpcIpSet *ipSet = ipSetRaw; SRpcIpSet *ipSet = ipSetRaw;
*ipSet = tsDMnodeIpSetForShell; *ipSet = tsDMnodeIpSet;
} }
static void dnodeProcessStatusRsp(SRpcMsg *pMsg) { static void dnodeProcessStatusRsp(SRpcMsg *pMsg) {
...@@ -349,19 +409,10 @@ static void dnodeUpdateMnodeInfos(SDMMnodeInfos *pMnodes) { ...@@ -349,19 +409,10 @@ static void dnodeUpdateMnodeInfos(SDMMnodeInfos *pMnodes) {
dPrint("mnode index:%d, %s", tsDMnodeInfos.nodeInfos[i].nodeId, tsDMnodeInfos.nodeInfos[i].nodeEp); dPrint("mnode index:%d, %s", tsDMnodeInfos.nodeInfos[i].nodeId, tsDMnodeInfos.nodeInfos[i].nodeEp);
} }
tsDMnodeIpSetForPeer.inUse = tsDMnodeInfos.inUse; tsDMnodeIpSet.inUse = tsDMnodeInfos.inUse;
tsDMnodeIpSetForPeer.numOfIps = tsDMnodeInfos.nodeNum; tsDMnodeIpSet.numOfIps = tsDMnodeInfos.nodeNum;
for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) {
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSetForPeer.fqdn[i], &tsDMnodeIpSetForPeer.port[i]);
tsDMnodeIpSetForPeer.port[i] += TSDB_PORT_DNODEDNODE;
dPrint("mnode index:%d, for peer %s %d", i, tsDMnodeIpSetForPeer.fqdn[i], tsDMnodeIpSetForPeer.port[i]);
}
tsDMnodeIpSetForShell.inUse = tsDMnodeInfos.inUse;
tsDMnodeIpSetForShell.numOfIps = tsDMnodeInfos.nodeNum;
for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) { for (int32_t i = 0; i < tsDMnodeInfos.nodeNum; i++) {
taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSetForShell.fqdn[i], &tsDMnodeIpSetForShell.port[i]); taosGetFqdnPortFromEp(tsDMnodeInfos.nodeInfos[i].nodeEp, tsDMnodeIpSet.fqdn[i], &tsDMnodeIpSet.port[i]);
dPrint("mnode index:%d, for shell %s %d", i, tsDMnodeIpSetForShell.fqdn[i], tsDMnodeIpSetForShell.port[i]);
} }
dnodeSaveMnodeInfos(); dnodeSaveMnodeInfos();
...@@ -487,7 +538,7 @@ static void dnodeSaveMnodeInfos() { ...@@ -487,7 +538,7 @@ static void dnodeSaveMnodeInfos() {
} }
char *dnodeGetMnodeMasterEp() { char *dnodeGetMnodeMasterEp() {
return tsDMnodeInfos.nodeInfos[tsDMnodeIpSetForPeer.inUse].nodeEp; return tsDMnodeInfos.nodeInfos[tsDMnodeIpSet.inUse].nodeEp;
} }
void* dnodeGetMnodeInfos() { void* dnodeGetMnodeInfos() {
...@@ -534,7 +585,9 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) { ...@@ -534,7 +585,9 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
.msgType = TSDB_MSG_TYPE_DM_STATUS .msgType = TSDB_MSG_TYPE_DM_STATUS
}; };
dnodeSendMsgToDnode(&tsDMnodeIpSetForPeer, &rpcMsg); SRpcIpSet ipSet;
dnodeGetMnodeIpSetForPeer(&ipSet);
dnodeSendMsgToDnode(&ipSet, &rpcMsg);
} }
static bool dnodeReadDnodeCfg() { static bool dnodeReadDnodeCfg() {
......
...@@ -43,10 +43,10 @@ int32_t dnodeInitServer() { ...@@ -43,10 +43,10 @@ int32_t dnodeInitServer() {
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_TABLE] = dnodeDispatchToVnodeWriteQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_TABLE] = dnodeDispatchToVnodeWriteQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = dnodeDispatchToVnodeWriteQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = dnodeDispatchToVnodeWriteQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeDispatchToDnodeMgmt; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeDispatchToMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeDispatchToDnodeMgmt; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeDispatchToMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeDispatchToDnodeMgmt; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeDispatchToMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeDispatchToDnodeMgmt; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeDispatchToMgmtQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_TABLE] = dnodeDispatchToMnodePeerQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_TABLE] = dnodeDispatchToMnodePeerQueue;
dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_VNODE] = dnodeDispatchToMnodePeerQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_VNODE] = dnodeDispatchToMnodePeerQueue;
......
...@@ -93,16 +93,13 @@ TAOS_DEFINE_ERROR(TSDB_CODE_NO_USER_FROM_CONN, 0, 0x0185, "can not get ...@@ -93,16 +93,13 @@ TAOS_DEFINE_ERROR(TSDB_CODE_NO_USER_FROM_CONN, 0, 0x0185, "can not get
TAOS_DEFINE_ERROR(TSDB_CODE_TABLE_ALREADY_EXIST, 0, 0x0200, "table already exist") TAOS_DEFINE_ERROR(TSDB_CODE_TABLE_ALREADY_EXIST, 0, 0x0200, "table already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TABLE_ID, 0, 0x0201, "invalid table id") TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TABLE_ID, 0, 0x0201, "invalid table id")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TABLE_TYPE, 0, 0x0202, "invalid table typee") TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TABLE_TYPE, 0, 0x0202, "invalid table typee")
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_TABLE, 0, 0x0203, "invalid table name") TAOS_DEFINE_ERROR(TSDB_CODE_NOT_SUPER_TABLE, 0, 0x0203, "no super table") // operation only available for super table
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_SUPER_TABLE, 0, 0x0204, "no super table") // operation only available for super table TAOS_DEFINE_ERROR(TSDB_CODE_TAG_ALREAY_EXIST, 0, 0x0204, "tag already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_ACTIVE_TABLE, 0, 0x0205, "not active table") TAOS_DEFINE_ERROR(TSDB_CODE_TAG_NOT_EXIST, 0, 0x0205, "tag not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_TABLE_ID_MISMATCH, 0, 0x0206, "table id mismatch") TAOS_DEFINE_ERROR(TSDB_CODE_FIELD_ALREAY_EXIST, 0, 0x0206, "field already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_TAG_ALREAY_EXIST, 0, 0x0207, "tag already exist") TAOS_DEFINE_ERROR(TSDB_CODE_FIELD_NOT_EXIST, 0, 0x0207, "field not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_TAG_NOT_EXIST, 0, 0x0208, "tag not exist") TAOS_DEFINE_ERROR(TSDB_CODE_COL_NAME_TOO_LONG, 0, 0x0209, "column name too long")
TAOS_DEFINE_ERROR(TSDB_CODE_FIELD_ALREAY_EXIST, 0, 0x0209, "field already exist") TAOS_DEFINE_ERROR(TSDB_CODE_TOO_MANY_TAGS, 0, 0x0209, "too many tags")
TAOS_DEFINE_ERROR(TSDB_CODE_FIELD_NOT_EXIST, 0, 0x020A, "field not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_COL_NAME_TOO_LONG, 0, 0x020B, "column name too long")
TAOS_DEFINE_ERROR(TSDB_CODE_TOO_MANY_TAGS, 0, 0x020C, "too many tags")
// dnode & mnode // dnode & mnode
...@@ -147,7 +144,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_CPU_LIMITED, 0, 0x038F, "grant cpu li ...@@ -147,7 +144,6 @@ TAOS_DEFINE_ERROR(TSDB_CODE_GRANT_CPU_LIMITED, 0, 0x038F, "grant cpu li
// server // server
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_VGROUP_ID, 0, 0x0400, "invalid vgroup id") TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_VGROUP_ID, 0, 0x0400, "invalid vgroup id")
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_ACTIVE_VNODE, 0, 0x0401, "not active vnode")
TAOS_DEFINE_ERROR(TSDB_CODE_VG_INIT_FAILED, 0, 0x0402, "vgroup init failed") TAOS_DEFINE_ERROR(TSDB_CODE_VG_INIT_FAILED, 0, 0x0402, "vgroup init failed")
TAOS_DEFINE_ERROR(TSDB_CODE_SERV_NO_DISKSPACE, 0, 0x0403, "server no diskspace") TAOS_DEFINE_ERROR(TSDB_CODE_SERV_NO_DISKSPACE, 0, 0x0403, "server no diskspace")
TAOS_DEFINE_ERROR(TSDB_CODE_SERV_OUT_OF_MEMORY, 0, 0x0404, "server out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_SERV_OUT_OF_MEMORY, 0, 0x0404, "server out of memory")
......
...@@ -220,6 +220,7 @@ typedef struct SAcctObj { ...@@ -220,6 +220,7 @@ typedef struct SAcctObj {
typedef struct { typedef struct {
int8_t type; int8_t type;
int32_t index;
char db[TSDB_DB_NAME_LEN + 1]; char db[TSDB_DB_NAME_LEN + 1];
void * pIter; void * pIter;
int16_t numOfColumns; int16_t numOfColumns;
...@@ -228,7 +229,6 @@ typedef struct { ...@@ -228,7 +229,6 @@ typedef struct {
int32_t numOfReads; int32_t numOfReads;
int16_t offset[TSDB_MAX_COLUMNS]; int16_t offset[TSDB_MAX_COLUMNS];
int16_t bytes[TSDB_MAX_COLUMNS]; int16_t bytes[TSDB_MAX_COLUMNS];
void * signature;
uint16_t payloadLen; uint16_t payloadLen;
char payload[]; char payload[];
} SShowObj; } SShowObj;
......
...@@ -27,7 +27,8 @@ void mnodeCleanupVgroups(); ...@@ -27,7 +27,8 @@ void mnodeCleanupVgroups();
SVgObj *mnodeGetVgroup(int32_t vgId); SVgObj *mnodeGetVgroup(int32_t vgId);
void mnodeIncVgroupRef(SVgObj *pVgroup); void mnodeIncVgroupRef(SVgObj *pVgroup);
void mnodeDecVgroupRef(SVgObj *pVgroup); void mnodeDecVgroupRef(SVgObj *pVgroup);
void mnodeDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg); void mnodeDropAllDbVgroups(SDbObj *pDropDb);
void mnodeSendDropAllDbVgroupsMsg(SDbObj *pDropDb);
void mnodeDropAllDnodeVgroups(SDnodeObj *pDropDnode); void mnodeDropAllDnodeVgroups(SDnodeObj *pDropDnode);
void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb); void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb);
......
...@@ -81,10 +81,10 @@ static int32_t mnodeDbActionDelete(SSdbOper *pOper) { ...@@ -81,10 +81,10 @@ static int32_t mnodeDbActionDelete(SSdbOper *pOper) {
SDbObj *pDb = pOper->pObj; SDbObj *pDb = pOper->pObj;
SAcctObj *pAcct = mnodeGetAcct(pDb->acct); SAcctObj *pAcct = mnodeGetAcct(pDb->acct);
mnodeDropDbFromAcct(pAcct, pDb);
mnodeDropAllChildTables(pDb); mnodeDropAllChildTables(pDb);
mnodeDropAllSuperTables(pDb); mnodeDropAllSuperTables(pDb);
mnodeDropAllDbVgroups(pDb, false); mnodeDropAllDbVgroups(pDb);
mnodeDropDbFromAcct(pAcct, pDb);
mnodeDecAcctRef(pAcct); mnodeDecAcctRef(pAcct);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -998,19 +998,7 @@ static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg) { ...@@ -998,19 +998,7 @@ static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg) {
return code; return code;
} }
#if 1 mnodeSendDropAllDbVgroupsMsg(pMsg->pDb);
mnodeDropAllDbVgroups(pMsg->pDb, true);
#else
SVgObj *pVgroup = pMsg->pDb->pHead;
if (pVgroup != NULL) {
mPrint("vgId:%d, will be dropped", pVgroup->vgId);
SMnodeMsg *newMsg = mnodeCloneMsg(pMsg);
newMsg->ahandle = pVgroup;
newMsg->expected = pVgroup->numOfVnodes;
mnodeDropVgroup(pVgroup, newMsg);
return;
}
#endif
mTrace("db:%s, all vgroups is dropped", pMsg->pDb->name); mTrace("db:%s, all vgroups is dropped", pMsg->pDb->name);
return mnodeDropDb(pMsg); return mnodeDropDb(pMsg);
......
...@@ -47,13 +47,14 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *mnodeMsg); ...@@ -47,13 +47,14 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *mnodeMsg);
static int32_t mnodeProcessUseMsg(SMnodeMsg *mnodeMsg); static int32_t mnodeProcessUseMsg(SMnodeMsg *mnodeMsg);
static void mnodeFreeShowObj(void *data); static void mnodeFreeShowObj(void *data);
static bool mnodeCheckShowObj(SShowObj *pShow); static bool mnodeAccquireShowObj(SShowObj *pShow);
static bool mnodeCheckShowFinished(SShowObj *pShow); static bool mnodeCheckShowFinished(SShowObj *pShow);
static void *mnodeSaveShowObj(SShowObj *pShow, int32_t size); static void *mnodePutShowObj(SShowObj *pShow, int32_t size);
static void mnodeCleanupShowObj(void *pShow, bool forceRemove); static void mnodeReleaseShowObj(void *pShow, bool forceRemove);
extern void *tsMnodeTmr; extern void *tsMnodeTmr;
static void *tsQhandleCache = NULL; static void *tsMnodeShowCache = NULL;
static int32_t tsShowObjIndex = 0;
static SShowMetaFp tsMnodeShowMetaFp[TSDB_MGMT_TABLE_MAX] = {0}; static SShowMetaFp tsMnodeShowMetaFp[TSDB_MGMT_TABLE_MAX] = {0};
static SShowRetrieveFp tsMnodeShowRetrieveFp[TSDB_MGMT_TABLE_MAX] = {0}; static SShowRetrieveFp tsMnodeShowRetrieveFp[TSDB_MGMT_TABLE_MAX] = {0};
...@@ -64,14 +65,15 @@ int32_t mnodeInitShow() { ...@@ -64,14 +65,15 @@ int32_t mnodeInitShow() {
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_CONNECT, mnodeProcessConnectMsg); mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_CONNECT, mnodeProcessConnectMsg);
mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_USE_DB, mnodeProcessUseMsg); mnodeAddReadMsgHandle(TSDB_MSG_TYPE_CM_USE_DB, mnodeProcessUseMsg);
tsQhandleCache = taosCacheInitWithCb(tsMnodeTmr, 10, mnodeFreeShowObj); tsMnodeShowCache = taosCacheInitWithCb(tsMnodeTmr, 10, mnodeFreeShowObj);
return 0; return 0;
} }
void mnodeCleanUpShow() { void mnodeCleanUpShow() {
if (tsQhandleCache != NULL) { if (tsMnodeShowCache != NULL) {
taosCacheCleanup(tsQhandleCache); mPrint("show cache is cleanup");
tsQhandleCache = NULL; taosCacheCleanup(tsMnodeShowCache);
tsMnodeShowCache = NULL;
} }
} }
...@@ -118,13 +120,12 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) { ...@@ -118,13 +120,12 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) {
int32_t showObjSize = sizeof(SShowObj) + htons(pShowMsg->payloadLen); int32_t showObjSize = sizeof(SShowObj) + htons(pShowMsg->payloadLen);
SShowObj *pShow = (SShowObj *) calloc(1, showObjSize); SShowObj *pShow = (SShowObj *) calloc(1, showObjSize);
pShow->signature = pShow;
pShow->type = pShowMsg->type; pShow->type = pShowMsg->type;
pShow->payloadLen = htons(pShowMsg->payloadLen); pShow->payloadLen = htons(pShowMsg->payloadLen);
strcpy(pShow->db, pShowMsg->db); strcpy(pShow->db, pShowMsg->db);
memcpy(pShow->payload, pShowMsg->payload, pShow->payloadLen); memcpy(pShow->payload, pShowMsg->payload, pShow->payloadLen);
pShow = mnodeSaveShowObj(pShow, showObjSize); pShow = mnodePutShowObj(pShow, showObjSize);
if (pShow == NULL) { if (pShow == NULL) {
return TSDB_CODE_SERV_OUT_OF_MEMORY; return TSDB_CODE_SERV_OUT_OF_MEMORY;
} }
...@@ -132,21 +133,22 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) { ...@@ -132,21 +133,22 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) {
int32_t size = sizeof(SCMShowRsp) + sizeof(SSchema) * TSDB_MAX_COLUMNS + TSDB_EXTRA_PAYLOAD_SIZE; int32_t size = sizeof(SCMShowRsp) + sizeof(SSchema) * TSDB_MAX_COLUMNS + TSDB_EXTRA_PAYLOAD_SIZE;
SCMShowRsp *pShowRsp = rpcMallocCont(size); SCMShowRsp *pShowRsp = rpcMallocCont(size);
if (pShowRsp == NULL) { if (pShowRsp == NULL) {
mnodeFreeShowObj(pShow); mnodeReleaseShowObj(pShow, true);
return TSDB_CODE_SERV_OUT_OF_MEMORY; return TSDB_CODE_SERV_OUT_OF_MEMORY;
} }
pShowRsp->qhandle = htobe64((uint64_t) pShow); pShowRsp->qhandle = htobe64((uint64_t) pShow);
mTrace("show:%p, type:%s, start to get meta", pShow, mnodeGetShowType(pShowMsg->type)); mTrace("%p, show type:%s, start to get meta", pShow, mnodeGetShowType(pShowMsg->type));
int32_t code = (*tsMnodeShowMetaFp[pShowMsg->type])(&pShowRsp->tableMeta, pShow, pMsg->rpcMsg.handle); int32_t code = (*tsMnodeShowMetaFp[pShowMsg->type])(&pShowRsp->tableMeta, pShow, pMsg->rpcMsg.handle);
if (code == 0) { if (code == 0) {
pMsg->rpcRsp.rsp = pShowRsp; pMsg->rpcRsp.rsp = pShowRsp;
pMsg->rpcRsp.len = sizeof(SCMShowRsp) + sizeof(SSchema) * pShow->numOfColumns; pMsg->rpcRsp.len = sizeof(SCMShowRsp) + sizeof(SSchema) * pShow->numOfColumns;
mnodeReleaseShowObj(pShow, false);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else { } else {
mError("show:%p, type:%s, failed to get meta, reason:%s", pShow, mnodeGetShowType(pShowMsg->type), tstrerror(code)); mError("%p, show type:%s, failed to get meta, reason:%s", pShow, mnodeGetShowType(pShowMsg->type), tstrerror(code));
rpcFreeCont(pShowRsp); rpcFreeCont(pShowRsp);
mnodeCleanupShowObj(pShow, true); mnodeReleaseShowObj(pShow, true);
return code; return code;
} }
} }
...@@ -159,22 +161,20 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) { ...@@ -159,22 +161,20 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) {
pRetrieve->qhandle = htobe64(pRetrieve->qhandle); pRetrieve->qhandle = htobe64(pRetrieve->qhandle);
SShowObj *pShow = (SShowObj *)pRetrieve->qhandle; SShowObj *pShow = (SShowObj *)pRetrieve->qhandle;
mTrace("show:%p, type:%s, retrieve data", pShow, mnodeGetShowType(pShow->type)); mTrace("%p, show type:%s, retrieve data", pShow, mnodeGetShowType(pShow->type));
/* /*
* in case of server restart, apps may hold qhandle created by server before * in case of server restart, apps may hold qhandle created by server before
* restart, which is actually invalid, therefore, signature check is required. * restart, which is actually invalid, therefore, signature check is required.
*/ */
if (!mnodeCheckShowObj(pShow)) { if (!mnodeAccquireShowObj(pShow)) {
mError("retrieve:%p, qhandle:%p is invalid", pRetrieve, pShow); mError("%p, show is invalid", pShow);
return TSDB_CODE_INVALID_QHANDLE; return TSDB_CODE_INVALID_QHANDLE;
} }
if (mnodeCheckShowFinished(pShow)) { if (mnodeCheckShowFinished(pShow)) {
mTrace("retrieve:%p, qhandle:%p already read finished, numOfReads:%d numOfRows:%d", pRetrieve, pShow, pShow->numOfReads, pShow->numOfRows); mTrace("%p, show is already read finished, numOfReads:%d numOfRows:%d", pShow, pShow->numOfReads, pShow->numOfRows);
pShow->numOfReads = pShow->numOfRows; pShow->numOfReads = pShow->numOfRows;
//mnodeCleanupShowObj(pShow, true);
//return TSDB_CODE_SUCCESS;
} }
if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE) { if ((pRetrieve->free & TSDB_QUERY_TYPE_FREE_RESOURCE) != TSDB_QUERY_TYPE_FREE_RESOURCE) {
...@@ -200,7 +200,7 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) { ...@@ -200,7 +200,7 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) {
if (rowsRead < 0) { if (rowsRead < 0) {
rpcFreeCont(pRsp); rpcFreeCont(pRsp);
mnodeCleanupShowObj(pShow, false); mnodeReleaseShowObj(pShow, false);
assert(false); assert(false);
return TSDB_CODE_ACTION_IN_PROGRESS; return TSDB_CODE_ACTION_IN_PROGRESS;
} }
...@@ -211,12 +211,13 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) { ...@@ -211,12 +211,13 @@ static int32_t mnodeProcessRetrieveMsg(SMnodeMsg *pMsg) {
pMsg->rpcRsp.rsp = pRsp; pMsg->rpcRsp.rsp = pRsp;
pMsg->rpcRsp.len = size; pMsg->rpcRsp.len = size;
if (rowsToRead == 0) { if (rowsToRead == 0 || rowsRead == rowsToRead) {
mnodeCleanupShowObj(pShow, true); pRsp->completed = 1;
mnodeReleaseShowObj(pShow, true);
} else { } else {
mnodeCleanupShowObj(pShow, false); mnodeReleaseShowObj(pShow, false);
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -318,24 +319,29 @@ static bool mnodeCheckShowFinished(SShowObj *pShow) { ...@@ -318,24 +319,29 @@ static bool mnodeCheckShowFinished(SShowObj *pShow) {
return false; return false;
} }
static bool mnodeCheckShowObj(SShowObj *pShow) { static bool mnodeAccquireShowObj(SShowObj *pShow) {
SShowObj *pSaved = taosCacheAcquireByData(tsQhandleCache, pShow); char key[10];
sprintf(key, "%d", pShow->index);
SShowObj *pSaved = taosCacheAcquireByName(tsMnodeShowCache, key);
if (pSaved == pShow) { if (pSaved == pShow) {
mTrace("%p, show is accquired from cache", pShow);
return true; return true;
} else { } else {
mTrace("show:%p, is already released", pShow);
return false; return false;
} }
} }
static void *mnodeSaveShowObj(SShowObj *pShow, int32_t size) { static void *mnodePutShowObj(SShowObj *pShow, int32_t size) {
if (tsQhandleCache != NULL) { if (tsMnodeShowCache != NULL) {
char key[24]; char key[10];
sprintf(key, "show:%p", pShow); pShow->index = atomic_add_fetch_32(&tsShowObjIndex, 1);
SShowObj *newQhandle = taosCachePut(tsQhandleCache, key, pShow, size, 60); sprintf(key, "%d", pShow->index);
SShowObj *newQhandle = taosCachePut(tsMnodeShowCache, key, pShow, size, 60);
free(pShow); free(pShow);
mTrace("show:%p, is saved", newQhandle); mTrace("%p, show is put into cache", newQhandle);
return newQhandle; return newQhandle;
} }
...@@ -345,10 +351,10 @@ static void *mnodeSaveShowObj(SShowObj *pShow, int32_t size) { ...@@ -345,10 +351,10 @@ static void *mnodeSaveShowObj(SShowObj *pShow, int32_t size) {
static void mnodeFreeShowObj(void *data) { static void mnodeFreeShowObj(void *data) {
SShowObj *pShow = data; SShowObj *pShow = data;
sdbFreeIter(pShow->pIter); sdbFreeIter(pShow->pIter);
mTrace("show:%p, is destroyed", pShow); mTrace("%p, show is destroyed", pShow);
} }
static void mnodeCleanupShowObj(void *pShow, bool forceRemove) { static void mnodeReleaseShowObj(void *pShow, bool forceRemove) {
mTrace("show:%p, is released, force:%s", pShow, forceRemove ? "true" : "false"); mTrace("%p, show is released, force:%s", pShow, forceRemove ? "true" : "false");
taosCacheRelease(tsQhandleCache, &pShow, forceRemove); taosCacheRelease(tsMnodeShowCache, &pShow, forceRemove);
} }
...@@ -148,37 +148,30 @@ static int32_t mnodeChildTableActionDelete(SSdbOper *pOper) { ...@@ -148,37 +148,30 @@ static int32_t mnodeChildTableActionDelete(SSdbOper *pOper) {
return TSDB_CODE_INVALID_VGROUP_ID; return TSDB_CODE_INVALID_VGROUP_ID;
} }
SVgObj *pVgroup = mnodeGetVgroup(pTable->vgId); SVgObj *pVgroup = NULL;
if (pVgroup == NULL) { SDbObj *pDb = NULL;
return TSDB_CODE_INVALID_VGROUP_ID; SAcctObj *pAcct = NULL;
}
mnodeDecVgroupRef(pVgroup); pVgroup = mnodeGetVgroup(pTable->vgId);
if (pVgroup != NULL) pDb = mnodeGetDb(pVgroup->dbName);
SDbObj *pDb = mnodeGetDb(pVgroup->dbName); if (pDb != NULL) pAcct = mnodeGetAcct(pDb->acct);
if (pDb == NULL) {
mError("ctable:%s, vgId:%d not in DB:%s", pTable->info.tableId, pVgroup->vgId, pVgroup->dbName);
return TSDB_CODE_INVALID_DB;
}
mnodeDecDbRef(pDb);
SAcctObj *pAcct = mnodeGetAcct(pDb->acct);
if (pAcct == NULL) {
mError("ctable:%s, acct:%s not exists", pTable->info.tableId, pDb->acct);
return TSDB_CODE_INVALID_ACCT;
}
mnodeDecAcctRef(pAcct);
if (pTable->info.type == TSDB_CHILD_TABLE) { if (pTable->info.type == TSDB_CHILD_TABLE) {
grantRestore(TSDB_GRANT_TIMESERIES, pTable->superTable->numOfColumns - 1); grantRestore(TSDB_GRANT_TIMESERIES, pTable->superTable->numOfColumns - 1);
pAcct->acctInfo.numOfTimeSeries -= (pTable->superTable->numOfColumns - 1); if (pAcct != NULL) pAcct->acctInfo.numOfTimeSeries -= (pTable->superTable->numOfColumns - 1);
mnodeRemoveTableFromStable(pTable->superTable, pTable); mnodeRemoveTableFromStable(pTable->superTable, pTable);
mnodeDecTableRef(pTable->superTable); mnodeDecTableRef(pTable->superTable);
} else { } else {
grantRestore(TSDB_GRANT_TIMESERIES, pTable->numOfColumns - 1); grantRestore(TSDB_GRANT_TIMESERIES, pTable->numOfColumns - 1);
pAcct->acctInfo.numOfTimeSeries -= (pTable->numOfColumns - 1); if (pAcct != NULL) pAcct->acctInfo.numOfTimeSeries -= (pTable->numOfColumns - 1);
} }
mnodeRemoveTableFromDb(pDb);
mnodeRemoveTableFromVgroup(pVgroup, pTable); if (pDb != NULL) mnodeRemoveTableFromDb(pDb);
if (pVgroup != NULL) mnodeRemoveTableFromVgroup(pVgroup, pTable);
mnodeDecVgroupRef(pVgroup);
mnodeDecDbRef(pDb);
mnodeDecAcctRef(pAcct);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -693,10 +686,10 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) { ...@@ -693,10 +686,10 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) {
} }
if (pCreate->numOfTags != 0) { if (pCreate->numOfTags != 0) {
mTrace("table:%s, create msg is received from thandle:%p", pCreate->tableId, pMsg->rpcMsg.handle); mTrace("table:%s, create stable msg is received from thandle:%p", pCreate->tableId, pMsg->rpcMsg.handle);
return mnodeProcessCreateSuperTableMsg(pMsg); return mnodeProcessCreateSuperTableMsg(pMsg);
} else { } else {
mTrace("table:%s, create msg is received from thandle:%p", pCreate->tableId, pMsg->rpcMsg.handle); mTrace("table:%s, create ctable msg is received from thandle:%p", pCreate->tableId, pMsg->rpcMsg.handle);
return mnodeProcessCreateChildTableMsg(pMsg); return mnodeProcessCreateChildTableMsg(pMsg);
} }
} }
...@@ -721,7 +714,7 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) { ...@@ -721,7 +714,7 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} else { } else {
mError("table:%s, failed to drop table, table not exist", pDrop->tableId); mError("table:%s, failed to drop table, table not exist", pDrop->tableId);
return TSDB_CODE_INVALID_TABLE; return TSDB_CODE_INVALID_TABLE_ID;
} }
} }
...@@ -749,7 +742,7 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) { ...@@ -749,7 +742,7 @@ static int32_t mnodeProcessTableMetaMsg(SMnodeMsg *pMsg) {
if (pMsg->pTable == NULL) { if (pMsg->pTable == NULL) {
if (!pInfo->createFlag) { if (!pInfo->createFlag) {
mError("table:%s, failed to get table meta, table not exist", pInfo->tableId); mError("table:%s, failed to get table meta, table not exist", pInfo->tableId);
return TSDB_CODE_INVALID_TABLE; return TSDB_CODE_INVALID_TABLE_ID;
} else { } else {
mTrace("table:%s, failed to get table meta, start auto create table ", pInfo->tableId); mTrace("table:%s, failed to get table meta, start auto create table ", pInfo->tableId);
return mnodeAutoCreateChildTable(pMsg); return mnodeAutoCreateChildTable(pMsg);
...@@ -786,7 +779,7 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { ...@@ -786,7 +779,7 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) {
if (pStable->schema == NULL) { if (pStable->schema == NULL) {
free(pStable); free(pStable);
mError("table:%s, failed to create, no schema input", pCreate->tableId); mError("table:%s, failed to create, no schema input", pCreate->tableId);
return TSDB_CODE_INVALID_TABLE; return TSDB_CODE_INVALID_TABLE_ID;
} }
memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema)); memcpy(pStable->schema, pCreate->schema, numOfCols * sizeof(SSchema));
...@@ -1276,9 +1269,9 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { ...@@ -1276,9 +1269,9 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
for (int32_t i = 0; i < numOfTable; ++i) { for (int32_t i = 0; i < numOfTable; ++i) {
char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN) * i; char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN) * i;
SSuperTableObj *pTable = mnodeGetSuperTable(stableName); SSuperTableObj *pTable = mnodeGetSuperTable(stableName);
if (pTable->vgHash != NULL) { if (pTable != NULL && pTable->vgHash != NULL) {
contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SCMVgroupInfo) + sizeof(SVgroupsInfo)); contLen += (taosHashGetSize(pTable->vgHash) * sizeof(SCMVgroupInfo) + sizeof(SVgroupsInfo));
} }
mnodeDecTableRef(pTable); mnodeDecTableRef(pTable);
} }
...@@ -1287,12 +1280,23 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { ...@@ -1287,12 +1280,23 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
return TSDB_CODE_SERV_OUT_OF_MEMORY; return TSDB_CODE_SERV_OUT_OF_MEMORY;
} }
pRsp->numOfTables = htonl(numOfTable); pRsp->numOfTables = 0;
char* msg = (char*) pRsp + sizeof(SCMSTableVgroupRspMsg); char *msg = (char *)pRsp + sizeof(SCMSTableVgroupRspMsg);
for (int32_t i = 0; i < numOfTable; ++i) { for (int32_t i = 0; i < numOfTable; ++i) {
char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN) * i; char *stableName = (char*)pInfo + sizeof(SCMSTableVgroupMsg) + (TSDB_TABLE_ID_LEN) * i;
SSuperTableObj *pTable = mnodeGetSuperTable(stableName); SSuperTableObj *pTable = mnodeGetSuperTable(stableName);
if (pTable == NULL) {
mError("stable:%s, not exist while get stable vgroup info", stableName);
mnodeDecTableRef(pTable);
continue;
}
if (pTable->vgHash == NULL) {
mError("stable:%s, not vgroup exist while get stable vgroup info", stableName);
mnodeDecTableRef(pTable);
continue;
}
SVgroupsInfo *pVgroupInfo = (SVgroupsInfo *)msg; SVgroupsInfo *pVgroupInfo = (SVgroupsInfo *)msg;
SHashMutableIterator *pIter = taosHashCreateIter(pTable->vgHash); SHashMutableIterator *pIter = taosHashCreateIter(pTable->vgHash);
...@@ -1318,17 +1322,25 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { ...@@ -1318,17 +1322,25 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) {
} }
taosHashDestroyIter(pIter); taosHashDestroyIter(pIter);
mnodeDecTableRef(pTable);
pVgroupInfo->numOfVgroups = htonl(vgSize); pVgroupInfo->numOfVgroups = htonl(vgSize);
// one table is done, try the next table // one table is done, try the next table
msg += sizeof(SVgroupsInfo) + vgSize * sizeof(SCMVgroupInfo); msg += sizeof(SVgroupsInfo) + vgSize * sizeof(SCMVgroupInfo);
pRsp->numOfTables++;
} }
pMsg->rpcRsp.rsp = pRsp; if (pRsp->numOfTables != numOfTable) {
pMsg->rpcRsp.len = msg - (char *)pRsp; rpcFreeCont(pRsp);
return TSDB_CODE_INVALID_TABLE_ID;
} else {
pRsp->numOfTables = htonl(pRsp->numOfTables);
pMsg->rpcRsp.rsp = pRsp;
pMsg->rpcRsp.len = msg - (char *)pRsp;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
}
} }
static void mnodeProcessDropSuperTableRsp(SRpcMsg *rpcMsg) { static void mnodeProcessDropSuperTableRsp(SRpcMsg *rpcMsg) {
...@@ -1429,8 +1441,8 @@ static SChildTableObj* mnodeDoCreateChildTable(SCMCreateTableMsg *pCreate, SVgOb ...@@ -1429,8 +1441,8 @@ static SChildTableObj* mnodeDoCreateChildTable(SCMCreateTableMsg *pCreate, SVgOb
SSuperTableObj *pSuperTable = mnodeGetSuperTable(pTagData->name); SSuperTableObj *pSuperTable = mnodeGetSuperTable(pTagData->name);
if (pSuperTable == NULL) { if (pSuperTable == NULL) {
mError("table:%s, corresponding super table:%s does not exist", pCreate->tableId, pTagData->name); mError("table:%s, corresponding super table:%s does not exist", pCreate->tableId, pTagData->name);
free(pTable); mnodeDestroyChildTable(pTable);
terrno = TSDB_CODE_INVALID_TABLE; terrno = TSDB_CODE_INVALID_TABLE_ID;
return NULL; return NULL;
} }
mnodeDecTableRef(pSuperTable); mnodeDecTableRef(pSuperTable);
...@@ -1738,7 +1750,7 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) { ...@@ -1738,7 +1750,7 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) {
static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) { static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
SCMTableInfoMsg *pInfo = pMsg->rpcMsg.pCont; SCMTableInfoMsg *pInfo = pMsg->rpcMsg.pCont;
STagData* pTag = (STagData*)pInfo->tags; STagData *pTag = (STagData *)pInfo->tags;
int32_t contLen = sizeof(SCMCreateTableMsg) + offsetof(STagData, data) + ntohl(pTag->dataLen); int32_t contLen = sizeof(SCMCreateTableMsg) + offsetof(STagData, data) + ntohl(pTag->dataLen);
SCMCreateTableMsg *pCreateMsg = rpcMallocCont(contLen); SCMCreateTableMsg *pCreateMsg = rpcMallocCont(contLen);
...@@ -1754,14 +1766,13 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) { ...@@ -1754,14 +1766,13 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) {
pCreateMsg->contLen = htonl(contLen); pCreateMsg->contLen = htonl(contLen);
memcpy(pCreateMsg->schema, pInfo->tags, contLen - sizeof(SCMCreateTableMsg)); memcpy(pCreateMsg->schema, pInfo->tags, contLen - sizeof(SCMCreateTableMsg));
mTrace("table:%s, start to create on demand, stable:%s", pInfo->tableId, ((STagData *)(pCreateMsg->schema))->name);
rpcFreeCont(pMsg->rpcMsg.pCont); rpcFreeCont(pMsg->rpcMsg.pCont);
pMsg->rpcMsg.msgType = TSDB_MSG_TYPE_CM_CREATE_TABLE; pMsg->rpcMsg.msgType = TSDB_MSG_TYPE_CM_CREATE_TABLE;
pMsg->rpcMsg.pCont = pCreateMsg; pMsg->rpcMsg.pCont = pCreateMsg;
pMsg->rpcMsg.contLen = contLen; pMsg->rpcMsg.contLen = contLen;
mTrace("table:%s, start to create on demand, stable:%s", pInfo->tableId, pInfo->tags);
return TSDB_CODE_ACTION_NEED_REPROCESSED; return TSDB_CODE_ACTION_NEED_REPROCESSED;
} }
...@@ -1888,7 +1899,7 @@ static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) { ...@@ -1888,7 +1899,7 @@ static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) {
SChildTableObj *pTable = mnodeGetTableByPos(pCfg->vgId, pCfg->sid); SChildTableObj *pTable = mnodeGetTableByPos(pCfg->vgId, pCfg->sid);
if (pTable == NULL) { if (pTable == NULL) {
mError("dnode:%d, vgId:%d sid:%d, table not found", pCfg->dnodeId, pCfg->vgId, pCfg->sid); mError("dnode:%d, vgId:%d sid:%d, table not found", pCfg->dnodeId, pCfg->vgId, pCfg->sid);
return TSDB_CODE_NOT_ACTIVE_TABLE; return TSDB_CODE_INVALID_TABLE_ID;
} }
SMDCreateTableMsg *pCreate = NULL; SMDCreateTableMsg *pCreate = NULL;
...@@ -2199,7 +2210,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) { ...@@ -2199,7 +2210,7 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pAlter->tableId); if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pAlter->tableId);
if (pMsg->pTable == NULL) { if (pMsg->pTable == NULL) {
mError("table:%s, failed to alter table, table not exist", pMsg->pTable->tableId); mError("table:%s, failed to alter table, table not exist", pMsg->pTable->tableId);
return TSDB_CODE_INVALID_TABLE; return TSDB_CODE_INVALID_TABLE_ID;
} }
pAlter->type = htons(pAlter->type); pAlter->type = htons(pAlter->type);
......
...@@ -716,14 +716,14 @@ static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) { ...@@ -716,14 +716,14 @@ static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) {
SDnodeObj *pDnode = mnodeGetDnode(pCfg->dnodeId); SDnodeObj *pDnode = mnodeGetDnode(pCfg->dnodeId);
if (pDnode == NULL) { if (pDnode == NULL) {
mTrace("dnode:%s, invalid dnode", taosIpStr(pCfg->dnodeId), pCfg->vgId); mTrace("dnode:%s, invalid dnode", taosIpStr(pCfg->dnodeId), pCfg->vgId);
return TSDB_CODE_NOT_ACTIVE_VNODE; return TSDB_CODE_INVALID_VGROUP_ID;
} }
mnodeDecDnodeRef(pDnode); mnodeDecDnodeRef(pDnode);
SVgObj *pVgroup = mnodeGetVgroup(pCfg->vgId); SVgObj *pVgroup = mnodeGetVgroup(pCfg->vgId);
if (pVgroup == NULL) { if (pVgroup == NULL) {
mTrace("dnode:%s, vgId:%d, no vgroup info", taosIpStr(pCfg->dnodeId), pCfg->vgId); mTrace("dnode:%s, vgId:%d, no vgroup info", taosIpStr(pCfg->dnodeId), pCfg->vgId);
return TSDB_CODE_NOT_ACTIVE_VNODE; return TSDB_CODE_INVALID_VGROUP_ID;
} }
mnodeDecVgroupRef(pVgroup); mnodeDecVgroupRef(pVgroup);
...@@ -784,7 +784,7 @@ void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb) { ...@@ -784,7 +784,7 @@ void mnodeUpdateAllDbVgroups(SDbObj *pAlterDb) {
mPrint("db:%s, all vgroups is updated in sdb", pAlterDb->name); mPrint("db:%s, all vgroups is updated in sdb", pAlterDb->name);
} }
void mnodeDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg) { void mnodeDropAllDbVgroups(SDbObj *pDropDb) {
void * pIter = NULL; void * pIter = NULL;
int32_t numOfVgroups = 0; int32_t numOfVgroups = 0;
SVgObj *pVgroup = NULL; SVgObj *pVgroup = NULL;
...@@ -802,10 +802,6 @@ void mnodeDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg) { ...@@ -802,10 +802,6 @@ void mnodeDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg) {
}; };
sdbDeleteRow(&oper); sdbDeleteRow(&oper);
numOfVgroups++; numOfVgroups++;
if (sendMsg) {
mnodeSendDropVgroupMsg(pVgroup, NULL);
}
} }
mnodeDecVgroupRef(pVgroup); mnodeDecVgroupRef(pVgroup);
...@@ -815,3 +811,25 @@ void mnodeDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg) { ...@@ -815,3 +811,25 @@ void mnodeDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg) {
mPrint("db:%s, all vgroups:%d is dropped from sdb", pDropDb->name, numOfVgroups); mPrint("db:%s, all vgroups:%d is dropped from sdb", pDropDb->name, numOfVgroups);
} }
void mnodeSendDropAllDbVgroupsMsg(SDbObj *pDropDb) {
void * pIter = NULL;
int32_t numOfVgroups = 0;
SVgObj *pVgroup = NULL;
mPrint("db:%s, all vgroups will be dropped in dnode", pDropDb->name);
while (1) {
pIter = mnodeGetNextVgroup(pIter, &pVgroup);
if (pVgroup == NULL) break;
if (pVgroup->pDb == pDropDb) {
mnodeSendDropVgroupMsg(pVgroup, NULL);
}
mnodeDecVgroupRef(pVgroup);
}
sdbFreeIter(pIter);
mPrint("db:%s, all vgroups:%d drop msg is sent to dnode", pDropDb->name, numOfVgroups);
}
\ No newline at end of file
...@@ -445,10 +445,10 @@ void httpJsonPairStatus(JsonBuf* buf, int code) { ...@@ -445,10 +445,10 @@ void httpJsonPairStatus(JsonBuf* buf, int code) {
httpJsonItemToken(buf); httpJsonItemToken(buf);
if (code == TSDB_CODE_DB_NOT_SELECTED) { if (code == TSDB_CODE_DB_NOT_SELECTED) {
httpJsonPair(buf, "desc", 4, "failed to create database", 23); httpJsonPair(buf, "desc", 4, "failed to create database", 23);
} else if (code == TSDB_CODE_INVALID_TABLE) { } else if (code == TSDB_CODE_INVALID_TABLE_ID) {
httpJsonPair(buf, "desc", 4, "failed to create table", 22); httpJsonPair(buf, "desc", 4, "failed to create table", 22);
} else } else
httpJsonPair(buf, "desc", 4, (char*)tstrerror(code), (int)strlen(tstrerror(code))); httpJsonPair(buf, "desc", 4, (char*)tstrerror(code), (int)strlen(tstrerror(code)));
} }
} }
} }
\ No newline at end of file
...@@ -111,7 +111,7 @@ bool tgCheckFinished(struct HttpContext *pContext, HttpSqlCmd *cmd, int code) { ...@@ -111,7 +111,7 @@ bool tgCheckFinished(struct HttpContext *pContext, HttpSqlCmd *cmd, int code) {
pContext->ipstr); pContext->ipstr);
return false; return false;
} }
} else if (code == TSDB_CODE_INVALID_TABLE) { } else if (code == TSDB_CODE_INVALID_TABLE_ID) {
cmd->cmdState = HTTP_CMD_STATE_RUN_FINISHED; cmd->cmdState = HTTP_CMD_STATE_RUN_FINISHED;
if (multiCmds->cmds[multiCmds->pos - 1].cmdState == HTTP_CMD_STATE_NOT_RUN_YET) { if (multiCmds->cmds[multiCmds->pos - 1].cmdState == HTTP_CMD_STATE_NOT_RUN_YET) {
multiCmds->pos = (int16_t)(multiCmds->pos - 2); multiCmds->pos = (int16_t)(multiCmds->pos - 2);
...@@ -151,4 +151,4 @@ void tgSetNextCmd(struct HttpContext *pContext, HttpSqlCmd *cmd, int code) { ...@@ -151,4 +151,4 @@ void tgSetNextCmd(struct HttpContext *pContext, HttpSqlCmd *cmd, int code) {
} else { } else {
multiCmds->pos++; multiCmds->pos++;
} }
} }
\ No newline at end of file
...@@ -420,13 +420,13 @@ void rpcSendResponse(const SRpcMsg *pRsp) { ...@@ -420,13 +420,13 @@ void rpcSendResponse(const SRpcMsg *pRsp) {
pConn->rspMsgLen = msgLen; pConn->rspMsgLen = msgLen;
if (pMsg->code == TSDB_CODE_ACTION_IN_PROGRESS) pConn->inTranId--; if (pMsg->code == TSDB_CODE_ACTION_IN_PROGRESS) pConn->inTranId--;
rpcUnlockConn(pConn);
taosTmrStopA(&pConn->pTimer); taosTmrStopA(&pConn->pTimer);
// taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer); // taosTmrReset(rpcProcessIdleTimer, pRpc->idleTime, pConn, pRpc->tmrCtrl, &pConn->pIdleTimer);
rpcSendMsgToPeer(pConn, msg, msgLen); rpcSendMsgToPeer(pConn, msg, msgLen);
pConn->secured = 1; // connection shall be secured pConn->secured = 1; // connection shall be secured
rpcUnlockConn(pConn);
return; return;
} }
...@@ -1095,10 +1095,10 @@ static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) { ...@@ -1095,10 +1095,10 @@ static void rpcSendReqToServer(SRpcInfo *pRpc, SRpcReqContext *pContext) {
pConn->reqMsgLen = msgLen; pConn->reqMsgLen = msgLen;
pConn->pContext = pContext; pConn->pContext = pContext;
rpcUnlockConn(pConn);
taosTmrReset(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl, &pConn->pTimer); taosTmrReset(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl, &pConn->pTimer);
rpcSendMsgToPeer(pConn, msg, msgLen); rpcSendMsgToPeer(pConn, msg, msgLen);
rpcUnlockConn(pConn);
} }
static void rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) { static void rpcSendMsgToPeer(SRpcConn *pConn, void *msg, int msgLen) {
......
...@@ -43,7 +43,7 @@ int32_t vnodeProcessRead(void *param, int msgType, void *pCont, int32_t contLen, ...@@ -43,7 +43,7 @@ int32_t vnodeProcessRead(void *param, int msgType, void *pCont, int32_t contLen,
return TSDB_CODE_MSG_NOT_PROCESSED; return TSDB_CODE_MSG_NOT_PROCESSED;
if (pVnode->status == TAOS_VN_STATUS_DELETING || pVnode->status == TAOS_VN_STATUS_CLOSING) if (pVnode->status == TAOS_VN_STATUS_DELETING || pVnode->status == TAOS_VN_STATUS_CLOSING)
return TSDB_CODE_NOT_ACTIVE_VNODE; return TSDB_CODE_INVALID_VGROUP_ID;
return (*vnodeProcessReadMsgFp[msgType])(pVnode, pCont, contLen, ret); return (*vnodeProcessReadMsgFp[msgType])(pVnode, pCont, contLen, ret);
} }
......
...@@ -53,7 +53,7 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) { ...@@ -53,7 +53,7 @@ int32_t vnodeProcessWrite(void *param1, int qtype, void *param2, void *item) {
if (pHead->version == 0) { // from client or CQ if (pHead->version == 0) { // from client or CQ
if (pVnode->status != TAOS_VN_STATUS_READY) if (pVnode->status != TAOS_VN_STATUS_READY)
return TSDB_CODE_NOT_ACTIVE_VNODE; return TSDB_CODE_INVALID_VGROUP_ID; // it may be in deleting or closing state
if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER) if (pVnode->syncCfg.replica > 1 && pVnode->role != TAOS_SYNC_ROLE_MASTER)
return TSDB_CODE_NOT_READY; return TSDB_CODE_NOT_READY;
......
#!/user/bin/gnuplot
reset
set terminal png
set title "Performance Test Report" font ",20"
set ylabel "Time in Seconds"
set xdata time
set timefmt "%Y%m%d"
set format x "%Y-%m-%d"
set xlabel "Date"
set style data linespoints
set terminal pngcairo size 1024,768 enhanced font 'Segoe UI, 10'
set output filename . '.png'
set datafile separator ','
set key reverse Left outside
set grid
# plot 'perftest-influx-report.csv' using 1:2 title "InfluxDB Write", \
# "" using 1:3 title "InfluxDB Query case1", \
# "" using 1:4 title "InfluxDB Query case2", \
# "" using 1:5 title "InfluxDB Query case3", \
# "" using 1:6 title "InfluxDB Query case4"
#
plot filename . '.csv' using 1:2 title "TDengine Write", \
"" using 1:3 title "TDengine Query case1", \
"" using 1:4 title "TDengine Query case2", \
"" using 1:5 title "TDengine Query case3", \
"" using 1:6 title "TDengine Query case4"
#!/bin/bash
# Coloured Echoes #
function red_echo { echo -e "\033[31m$@\033[0m"; } #
function green_echo { echo -e "\033[32m$@\033[0m"; } #
function yellow_echo { echo -e "\033[33m$@\033[0m"; } #
function white_echo { echo -e "\033[1;37m$@\033[0m"; } #
# Coloured Printfs #
function red_printf { printf "\033[31m$@\033[0m"; } #
function green_printf { printf "\033[32m$@\033[0m"; } #
function yellow_printf { printf "\033[33m$@\033[0m"; } #
function white_printf { printf "\033[1;37m$@\033[0m"; } #
# Debugging Outputs #
function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } #
function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } #
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
function set-Wal {
echo "/etc/taos/taos.cfg walLevel will be set to $1"
sed -i 's/^walLevel.*$/walLevel '"$1"'/g' /etc/taos/taos.cfg
}
function collectSysInfo {
rm sysinfo.log
grep model /proc/cpuinfo | tail -n1 | tee sysinfo.log
grep cores /proc/cpuinfo | tail -n1 | tee -a sysinfo.log
grep MemTotal /proc/meminfo | tee -a sysinfo.log
grep "^[^#;]" /etc/taos/taos.cfg | tee taos.cfg
}
function buildTDengine {
cd /root/TDengine
git pull
cd debug
rm -rf *
cmake ..
make > /dev/null
make install
}
function restartTaosd {
systemctl stop taosd
pkill -KILL -x taosd
sleep 10
rm -rf /mnt/var/log/taos/*
rm -rf /mnt/var/lib/taos/*
taosd 2>&1 > /dev/null &
sleep 10
}
function sendReport {
receiver="sdsang@taosdata.com, sangshuduo@gmail.com"
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
echo -e "to: ${receiver}\nsubject: Perf test report ${today}\n" | \
(cat - && uuencode perftest-1d-$today.log perftest-1d-$today.log)| \
(cat - && uuencode perftest-1d-report.csv perftest-1d-report-$today.csv) | \
(cat - && uuencode perftest-1d-report.png perftest-1d-report-$today.png) | \
(cat - && uuencode perftest-13d-$today.log perftest-13d-$today.log)| \
(cat - && uuencode perftest-13d-report.csv perftest-13d-report-$today.csv) | \
(cat - && uuencode perftest-13d-report.png perftest-13d-report-$today.png) | \
(cat - && uuencode taosdemo-$today.log taosdemo-$today.log) | \
(cat - && uuencode taosdemo-report.csv taosdemo-report-$today.csv) | \
(cat - && uuencode taosdemo-report.png taosdemo-report-$today.png) | \
(cat - && uuencode sysinfo.log sysinfo.txt) | \
(cat - && uuencode taos.cfg taos-cfg-$today.txt) | \
ssmtp "${receiver}"
}
today=`date +"%Y%m%d"`
cd /root
echo -e "cron-ran-at-${today}" >> cron.log
echoInfo "Build TDengine"
buildTDengine
set-Wal "2"
cd /root
./perftest-tsdb-compare-1d.sh
cd /root
./perftest-tsdb-compare-13d.sh
cd /root
./perftest-taosdemo.sh
collectSysInfo
echoInfo "Send Report"
sendReport
echoInfo "End of Test"
#!/bin/bash
# Coloured Echoes #
function red_echo { echo -e "\033[31m$@\033[0m"; } #
function green_echo { echo -e "\033[32m$@\033[0m"; } #
function yellow_echo { echo -e "\033[33m$@\033[0m"; } #
function white_echo { echo -e "\033[1;37m$@\033[0m"; } #
# Coloured Printfs #
function red_printf { printf "\033[31m$@\033[0m"; } #
function green_printf { printf "\033[32m$@\033[0m"; } #
function yellow_printf { printf "\033[33m$@\033[0m"; } #
function white_printf { printf "\033[1;37m$@\033[0m"; } #
# Debugging Outputs #
function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } #
function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } #
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
function restartTaosd {
systemctl stop taosd
pkill -KILL -x taosd
sleep 10
rm -rf /mnt/var/log/taos/*
rm -rf /mnt/var/lib/taos/*
taosd 2>&1 > /dev/null &
sleep 10
}
function runCreateTableOnly {
echoInfo "Restart Taosd"
restartTaosd
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo -n 0 2>&1 | tee taosdemo-$today.log"
demoTableOnly=`grep "Total:" totaltime.out|awk '{print $2}'`
}
function runCreateTableThenInsert {
echoInfo "Restart Taosd"
restartTaosd
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo 2>&1 | tee -a taosdemo-$today.log"
demoTableAndInsert=`grep "Total:" totaltime.out|awk '{print $2}'`
demoRPS=`grep "records\/second" taosdemo-$today.log | tail -n1 | awk '{print $13}'`
}
function generateTaosdemoPlot {
echo "${today}, demoTableOnly: ${demoTableOnly}, demoTableAndInsert: ${demoTableAndInsert}" | tee -a taosdemo-$today.log
echo "${today}, ${demoTableOnly}, ${demoTableAndInsert}, ${demoRPS}" >> taosdemo-report.csv
csvLines=`cat taosdemo-report.csv | wc -l`
if [ "$csvLines" -gt "10" ]; then
sed -i '1d' taosdemo-report.csv
fi
gnuplot -p taosdemo-csv2png.gnuplot
}
today=`date +"%Y%m%d"`
cd /root
echoInfo "Test Create Table Only "
runCreateTableOnly
echoInfo "Test Create Table then Insert data"
runCreateTableThenInsert
echoInfo "Generate plot for taosdemo"
generateTaosdemoPlot
echoInfo "End of TaosDemo Test"
#!/bin/bash
# Coloured Echoes #
function red_echo { echo -e "\033[31m$@\033[0m"; } #
function green_echo { echo -e "\033[32m$@\033[0m"; } #
function yellow_echo { echo -e "\033[33m$@\033[0m"; } #
function white_echo { echo -e "\033[1;37m$@\033[0m"; } #
# Coloured Printfs #
function red_printf { printf "\033[31m$@\033[0m"; } #
function green_printf { printf "\033[32m$@\033[0m"; } #
function yellow_printf { printf "\033[33m$@\033[0m"; } #
function white_printf { printf "\033[1;37m$@\033[0m"; } #
# Debugging Outputs #
function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } #
function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } #
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
function restartTaosd {
systemctl stop taosd
pkill -KILL -x taosd
sleep 10
rm -rf /mnt/var/log/taos/*
rm -rf /mnt/var/lib/taos/*
taosd 2>&1 > /dev/null &
sleep 10
}
function runPerfTest13d {
echoInfo "Restart Taosd"
restartTaosd
cd /home/taos/tliu/timeseriesdatabase-comparisons/build/tsdbcompare
./runreal-13d-csv.sh 2>&1 | tee /root/perftest-13d-$today.log
}
function generatePerfPlot13d {
cd /root
csvLines=`cat perftest-13d-report.csv | wc -l`
if [ "$csvLines" -gt "10" ]; then
sed -i '1d' perftest-13d-report.csv
fi
gnuplot -e "filename='perftest-13d-report'" -p perftest-csv2png.gnuplot
}
today=`date +"%Y%m%d"`
cd /root
echoInfo "run Performance Test with 13 days data"
runPerfTest13d
echoInfo "Generate plot of 13 days data"
generatePerfPlot13d
echoInfo "End of TSDB-Compare 13-days-data Test"
#!/bin/bash
# Coloured Echoes #
function red_echo { echo -e "\033[31m$@\033[0m"; } #
function green_echo { echo -e "\033[32m$@\033[0m"; } #
function yellow_echo { echo -e "\033[33m$@\033[0m"; } #
function white_echo { echo -e "\033[1;37m$@\033[0m"; } #
# Coloured Printfs #
function red_printf { printf "\033[31m$@\033[0m"; } #
function green_printf { printf "\033[32m$@\033[0m"; } #
function yellow_printf { printf "\033[33m$@\033[0m"; } #
function white_printf { printf "\033[1;37m$@\033[0m"; } #
# Debugging Outputs #
function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } #
function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } #
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
function restartTaosd {
systemctl stop taosd
pkill -KILL -x taosd
sleep 10
rm -rf /mnt/var/log/taos/*
rm -rf /mnt/var/lib/taos/*
taosd 2>&1 > /dev/null &
sleep 10
}
function runPerfTest1d {
echoInfo "Restart Taosd"
restartTaosd
cd /home/taos/tliu/timeseriesdatabase-comparisons/build/tsdbcompare
./runreal-1d-csv.sh 2>&1 | tee /root/perftest-1d-$today.log
}
function generatePerfPlot1d {
cd /root
csvLines=`cat perftest-1d-report.csv | wc -l`
if [ "$csvLines" -gt "10" ]; then
sed -i '2d' perftest-1d-report.csv
fi
gnuplot -e "filename='perftest-1d-report'" -p perftest-csv2png.gnuplot
}
today=`date +"%Y%m%d"`
cd /root
echoInfo "run Performance Test with 1 day data"
runPerfTest1d
echoInfo "Generate plot of 1 day data"
generatePerfPlot1d
echoInfo "End of TSDB-Compare 1-day-data Test"
#!/bin/bash
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
docker rm -f `docker ps -a -q`
#set -x
echo
echo "---------------Generating Data-----------------"
echo
echo "Prepare data for InfluxDB...."
#bin/bulk_data_gen -seed 123 -format influx-bulk -scale-var 100 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" >data/influx.dat
bin/bulk_data_gen -seed 123 -format influx-bulk -sampling-interval 1s -scale-var 10 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" >data/influx.dat
echo
echo "Prepare data for TDengine...."
#bin/bulk_data_gen -seed 123 -format tdengine -tdschema-file config/TDengineSchema.toml -scale-var 100 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" > data/tdengine.dat
bin/bulk_data_gen -seed 123 -format tdengine -sampling-interval 1s -tdschema-file config/TDengineSchema.toml -scale-var 10 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" > data/tdengine.dat
docker network create --ip-range 172.15.1.255/24 --subnet 172.15.1.1/16 tsdbcomp >>/dev/null 2>&1
TDENGINE=`docker run -d --net tsdbcomp --ip 172.15.1.6 -p 6030:6030 -p 6020:6020 -p 6031:6031 -p 6032:6032 -p 6033:6033 -p 6034:6034 -p 6035:6035 -p 6036:6036 -p 6037:6037 -p 6038:6038 -p 6039:6039 tdengine/tdengine:1.6.4.5`
echo
echo "------------------Writing Data-----------------"
echo
sleep 5
echo
echo -e "Start test TDengine, result in ${GREEN}Green line${NC}"
TDENGINERES=`cat data/tdengine.dat |bin/bulk_load_tdengine --url 172.15.1.6:0 --batch-size 300 -do-load -report-tags n1 -workers 20 -fileout=false| grep loaded`
#TDENGINERES=`cat data/tdengine.dat |gunzip|bin/bulk_load_tdengine --url 172.15.1.6:0 --batch-size 300 -do-load -report-tags n1 -workers 10 -fileout=false| grep loaded`
echo
echo -e "${GREEN}TDengine writing result:${NC}"
echo -e "${GREEN}$TDENGINERES${NC}"
DATA=`echo $TDENGINERES|awk '{print($2)}'`
TMP=`echo $TDENGINERES|awk '{print($5)}'`
TDWTM=`echo ${TMP%s*}`
INFLUX=`docker run -d -p 8086:8086 --net tsdbcomp --ip 172.15.1.5 influxdb` >>/dev/null 2>&1
sleep 10
echo
echo -e "Start test InfluxDB, result in ${GREEN}Green line${NC}"
INFLUXRES=`cat data/influx.dat |bin/bulk_load_influx --batch-size=5000 --workers=20 --urls="http://172.15.1.5:8086" | grep loaded`
echo
echo -e "${GREEN}InfluxDB writing result:${NC}"
echo -e "${GREEN}$INFLUXRES${NC}"
TMP=`echo $INFLUXRES|awk '{print($5)}'`
IFWTM=`echo ${TMP%s*}`
echo
echo "------------------Querying Data-----------------"
echo
sleep 10
echo
echo "start query test, query max from 8 hosts group by 1 hour, TDengine"
echo
#Test case 1
#测试用例1,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
# a,b,c,d,e,f,g,h are random 8 numbers.
TDQS1=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-all -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://172.15.1.6:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 1 result:${NC}"
echo -e "${GREEN}$TDQS1${NC}"
TMP=`echo $TDQS1|awk '{print($4)}'`
TDQ1=`echo ${TMP%s*}`
#Test case 2
#测试用例2,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1小时为粒度,查询每1小时的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
# a,b,c,d,e,f,g,h are random 8 numbers
TDQS2=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-allbyhr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://172.15.1.6:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 2 result:${NC}"
echo -e "${GREEN}$TDQS2${NC}"
TMP=`echo $TDQS2|awk '{print($4)}'`
TDQ2=`echo ${TMP%s*}`
#Test case 3
#测试用例3,测试用例3,随机查询12个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以10分钟为粒度,查询每10分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
TDQS3=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-12-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://172.15.1.6:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 3 result:${NC}"
echo -e "${GREEN}$TDQS3${NC}"
TMP=`echo $TDQS3|awk '{print($4)}'`
TDQ3=`echo ${TMP%s*}`
#Test case 4
#测试用例4,随机查询1个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1分钟为粒度,查询每1分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
TDQS4=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://172.15.1.6:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 4 result:${NC}"
echo -e "${GREEN}$TDQS4${NC}"
TMP=`echo $TDQS4|awk '{print($4)}'`
TDQ4=`echo ${TMP%s*}`
sleep 10
echo
echo "start query test, query max from 8 hosts group by 1hour, Influxdb"
echo
#Test case 1
#测试用例1,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
# a,b,c,d,e,f,g,h are random 8 numbers.
IFQS1=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-all -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 1 result:${NC}"
echo -e "${GREEN}$IFQS1${NC}"
TMP=`echo $IFQS1|awk '{print($4)}'`
IFQ1=`echo ${TMP%s*}`
#Test case 2
#测试用例2,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1小时为粒度,查询每1小时的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
# a,b,c,d,e,f,g,h are random 8 numbers
IFQS2=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-allbyhr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 2 result:${NC}"
echo -e "${GREEN}$IFQS2${NC}"
TMP=`echo $IFQS2|awk '{print($4)}'`
IFQ2=`echo ${TMP%s*}`
#Test case 3
#测试用例3,测试用例3,随机查询12个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以10分钟为粒度,查询每10分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
#INFLUXQUERY=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
IFQS3=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-12-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 3 result:${NC}"
echo -e "${GREEN}$IFQS3${NC}"
TMP=`echo $IFQS3|awk '{print($4)}'`
IFQ3=`echo ${TMP%s*}`
#Test case 4
#测试用例4,随机查询1个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1分钟为粒度,查询每1分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
#INFLUXQUERY=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
IFQS4=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 4 result:${NC}"
echo -e "${GREEN}$IFQS4${NC}"
TMP=`echo $IFQS4|awk '{print($4)}'`
IFQ4=`echo ${TMP%s*}`
echo
echo
echo "======================================================"
echo " tsdb performance comparision "
echo "======================================================"
echo -e " Writing $DATA records test takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFWTM
printf " TDengine | %-4.2f Seconds \n" $TDWTM
echo "------------------------------------------------------"
echo " Query test cases: "
echo " case 1: select the max(value) from all data "
echo " filtered out 8 hosts "
echo " Query test case 1 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ1
printf " TDengine | %-4.2f Seconds \n" $TDQ1
echo "------------------------------------------------------"
echo " case 2: select the max(value) from all data "
echo " filtered out 8 hosts with an interval of 1 hour "
echo " case 2 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ2
printf " TDengine | %-4.2f Seconds \n" $TDQ2
echo "------------------------------------------------------"
echo " case 3: select the max(value) from random 12 hours"
echo " data filtered out 8 hosts with an interval of 10 min "
echo " filtered out 8 hosts interval(1h) "
echo " case 3 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ3
printf " TDengine | %-4.2f Seconds \n" $TDQ3
echo "------------------------------------------------------"
echo " case 4: select the max(value) from random 1 hour data "
echo " data filtered out 8 hosts with an interval of 1 min "
echo " case 4 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ4
printf " TDengine | %-4.2f Seconds \n" $TDQ4
echo "------------------------------------------------------"
echo
docker stop $INFLUX >>/dev/null 2>&1
docker container rm -f $INFLUX >>/dev/null 2>&1
docker stop $TDENGINE >>/dev/null 2>&1
docker container rm -f $TDENGINE >>/dev/null 2>&1
docker network rm tsdbcomp >>/dev/null 2>&1
#bulk_query_gen/bulk_query_gen -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_influxdb/query_benchmarker_influxdb -urls="http://172.26.89.231:8086"
#bulk_query_gen/bulk_query_gen -format tdengine -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_tdengine/query_benchmarker_tdengine -urls="http://172.26.89.231:6020"
today=`date +"%Y%m%d"`
echo "${today}, ${IFWTM}, ${IFQ1}, ${IFQ2}, ${IFQ3}, ${IFQ4}" >> /root/perftest-influx-report.csv
#!/bin/bash
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
docker rm -f `docker ps -a -q`
#set -x
echo
echo "---------------Generating Data-----------------"
echo
echo "Prepare data for InfluxDB...."
#bin/bulk_data_gen -seed 123 -format influx-bulk -scale-var 100 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" >data/influx.dat
bin/bulk_data_gen -seed 123 -format influx-bulk -sampling-interval 1s -scale-var 10 -use-case devops -timestamp-start "2018-01-02T00:00:00Z" -timestamp-end "2018-01-15T00:00:00Z" > /mnt/data/influx.dat
docker network create --ip-range 172.15.1.255/24 --subnet 172.15.1.1/16 tsdbcomp >>/dev/null 2>&1
INFLUX=`docker run -d -p 8086:8086 --net tsdbcomp --ip 172.15.1.5 influxdb` >>/dev/null 2>&1
sleep 10
echo
echo -e "Start test InfluxDB, result in ${GREEN}Green line${NC}"
INFLUXRES=`cat /mnt/data/influx.dat |bin/bulk_load_influx --batch-size=5000 --workers=20 --urls="http://172.15.1.5:8086" | grep loaded`
echo
echo -e "${GREEN}InfluxDB writing result:${NC}"
echo -e "${GREEN}$INFLUXRES${NC}"
DATA=`echo $INFLUXRES|awk '{print($2)}'`
TMP=`echo $INFLUXRES|awk '{print($5)}'`
IFWTM=`echo ${TMP%s*}`
echo
echo "------------------Querying Data-----------------"
echo
sleep 10
echo
echo "start query test, query max from 8 hosts group by 1hour, Influxdb"
echo
#Test case 1
#测试用例1,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
# a,b,c,d,e,f,g,h are random 8 numbers.
IFQS1=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-all -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 1 result:${NC}"
echo -e "${GREEN}$IFQS1${NC}"
TMP=`echo $IFQS1|awk '{print($4)}'`
IFQ1=`echo ${TMP%s*}`
#Test case 2
#测试用例2,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1小时为粒度,查询每1小时的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
# a,b,c,d,e,f,g,h are random 8 numbers
IFQS2=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-allbyhr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 2 result:${NC}"
echo -e "${GREEN}$IFQS2${NC}"
TMP=`echo $IFQS2|awk '{print($4)}'`
IFQ2=`echo ${TMP%s*}`
#Test case 3
#测试用例3,测试用例3,随机查询12个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以10分钟为粒度,查询每10分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
#INFLUXQUERY=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
IFQS3=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-12-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 3 result:${NC}"
echo -e "${GREEN}$IFQS3${NC}"
TMP=`echo $IFQS3|awk '{print($4)}'`
IFQ3=`echo ${TMP%s*}`
#Test case 4
#测试用例4,随机查询1个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1分钟为粒度,查询每1分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
#INFLUXQUERY=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
IFQS4=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 4 result:${NC}"
echo -e "${GREEN}$IFQS4${NC}"
TMP=`echo $IFQS4|awk '{print($4)}'`
IFQ4=`echo ${TMP%s*}`
echo
echo
echo "======================================================"
echo " tsdb performance comparision "
echo "======================================================"
echo -e " Writing $DATA records test takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFWTM
echo "------------------------------------------------------"
echo " Query test cases: "
echo " case 1: select the max(value) from all data "
echo " filtered out 8 hosts "
echo " Query test case 1 takes: "
printf " InfluxDB | %-4.5f Seconds \n" $IFQ1
echo "------------------------------------------------------"
echo " case 2: select the max(value) from all data "
echo " filtered out 8 hosts with an interval of 1 hour "
echo " case 2 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ2
echo "------------------------------------------------------"
echo " case 3: select the max(value) from random 12 hours"
echo " data filtered out 8 hosts with an interval of 10 min "
echo " filtered out 8 hosts interval(1h) "
echo " case 3 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ3
echo "------------------------------------------------------"
echo " case 4: select the max(value) from random 1 hour data "
echo " data filtered out 8 hosts with an interval of 1 min "
echo " case 4 takes: "
printf " InfluxDB | %-4.2f Seconds \n" $IFQ4
echo "------------------------------------------------------"
echo
today=`date +"%Y%m%d"`
echo "${today}, ${IFWTM}, ${IFQ1}, ${IFQ2}, ${IFQ3}, ${IFQ4}" >> /root/perftest-influxdb-report-13d.csv
docker stop $INFLUX >>/dev/null 2>&1
docker container rm -f $INFLUX >>/dev/null 2>&1
docker network rm tsdbcomp >>/dev/null 2>&1
#bulk_query_gen/bulk_query_gen -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_influxdb/query_benchmarker_influxdb -urls="http://172.26.89.231:8086"
#bulk_query_gen/bulk_query_gen -format tdengine -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_tdengine/query_benchmarker_tdengine -urls="http://172.26.89.231:6020"
#!/bin/bash
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
#set -x
echo
echo "---------------Generating Data-----------------"
echo
echo
echo "Prepare data for TDengine...."
#bin/bulk_data_gen -seed 123 -format tdengine -tdschema-file config/TDengineSchema.toml -scale-var 100 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" > data/tdengine.dat
bin/bulk_data_gen -seed 123 -format tdengine -sampling-interval 1s -tdschema-file config/TDengineSchema.toml -scale-var 10 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-14T00:00:00Z" > /mnt/data/tdengine.dat
echo
echo -e "Start test TDengine, result in ${GREEN}Green line${NC}"
for i in {1..5}; do
TDENGINERES=`cat /mnt/data/tdengine.dat |bin/bulk_load_tdengine --url 127.0.0.1:0 --batch-size 5000 -do-load -report-tags n1 -workers 20 -fileout=false| grep loaded`
#TDENGINERES=`cat data/tdengine.dat |gunzip|bin/bulk_load_tdengine --url 127.0.0.1:0 --batch-size 300 -do-load -report-tags n1 -workers 10 -fileout=false| grep loaded`
echo
echo -e "${GREEN}TDengine writing result:${NC}"
echo -e "${GREEN}$TDENGINERES${NC}"
DATA=`echo $TDENGINERES|awk '{print($2)}'`
TMP=`echo $TDENGINERES|awk '{print($5)}'`
TDWTM=`echo ${TMP%s*}`
[ -z "$TDWTM" ] || break
done
echo
echo "------------------Querying Data-----------------"
echo
sleep 10
echo
echo "start query test, query max from 8 hosts group by 1 hour, TDengine"
echo
#Test case 1
#测试用例1,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
# a,b,c,d,e,f,g,h are random 8 numbers.
for i in {1..5}; do
TDQS1=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-all -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 1 result:${NC}"
echo -e "${GREEN}$TDQS1${NC}"
TMP=`echo $TDQS1|awk '{print($4)}'`
TDQ1=`echo ${TMP%s*}`
[ -z "$TDQ1" ] || break
done
#Test case 2
#测试用例2,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1小时为粒度,查询每1小时的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
# a,b,c,d,e,f,g,h are random 8 numbers
for i in {1..5}; do
TDQS2=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-allbyhr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 2 result:${NC}"
echo -e "${GREEN}$TDQS2${NC}"
TMP=`echo $TDQS2|awk '{print($4)}'`
TDQ2=`echo ${TMP%s*}`
[ -z "$TDQ2" ] || break
done
#Test case 3
#测试用例3,测试用例3,随机查询12个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以10分钟为粒度,查询每10分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
for i in {1..5}; do
TDQS3=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-12-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 3 result:${NC}"
echo -e "${GREEN}$TDQS3${NC}"
TMP=`echo $TDQS3|awk '{print($4)}'`
TDQ3=`echo ${TMP%s*}`
[ -z "$TDQ3" ] || break
done
#Test case 4
#测试用例4,随机查询1个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1分钟为粒度,查询每1分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
for i in {1..5}; do
TDQS4=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 4 result:${NC}"
echo -e "${GREEN}$TDQS4${NC}"
TMP=`echo $TDQS4|awk '{print($4)}'`
TDQ4=`echo ${TMP%s*}`
[ -z "$TDQ4" ] || break
done
sleep 10
echo
echo
echo "======================================================"
echo " tsdb performance comparision "
echo "======================================================"
echo -e " Writing $DATA records test takes: "
printf " TDengine | %-4.2f Seconds \n" $TDWTM
echo "------------------------------------------------------"
echo " Query test cases: "
echo " case 1: select the max(value) from all data "
echo " filtered out 8 hosts "
echo " Query test case 1 takes: "
printf " TDengine | %-4.5f Seconds \n" $TDQ1
echo "------------------------------------------------------"
echo " case 2: select the max(value) from all data "
echo " filtered out 8 hosts with an interval of 1 hour "
echo " case 2 takes: "
printf " TDengine | %-4.2f Seconds \n" $TDQ2
echo "------------------------------------------------------"
echo " case 3: select the max(value) from random 12 hours"
echo " data filtered out 8 hosts with an interval of 10 min "
echo " filtered out 8 hosts interval(1h) "
echo " case 3 takes: "
printf " TDengine | %-4.2f Seconds \n" $TDQ3
echo "------------------------------------------------------"
echo " case 4: select the max(value) from random 1 hour data "
echo " data filtered out 8 hosts with an interval of 1 min "
echo " case 4 takes: "
printf " TDengine | %-4.2f Seconds \n" $TDQ4
echo "------------------------------------------------------"
echo
today=`date +"%Y%m%d"`
echo "${today}, ${TDWTM}, ${TDQ1}, ${TDQ2}, ${TDQ3}, ${TDQ4}" >> /root/perftest-13d-report.csv
#bulk_query_gen/bulk_query_gen -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_influxdb/query_benchmarker_influxdb -urls="http://172.26.89.231:8086"
#bulk_query_gen/bulk_query_gen -format tdengine -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_tdengine/query_benchmarker_tdengine -urls="http://172.26.89.231:6020"
#!/bin/bash
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
#set -x
echo
echo "---------------Generating Data-----------------"
echo
echo
echo "Prepare data for TDengine...."
#bin/bulk_data_gen -seed 123 -format tdengine -tdschema-file config/TDengineSchema.toml -scale-var 100 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" > data/tdengine.dat
bin/bulk_data_gen -seed 123 -format tdengine -sampling-interval 1s -tdschema-file config/TDengineSchema.toml -scale-var 10 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" > data/tdengine.dat
echo
echo -e "Start test TDengine, result in ${GREEN}Green line${NC}"
for i in {1..5}; do
TDENGINERES=`cat data/tdengine.dat |bin/bulk_load_tdengine --url 127.0.0.1:0 --batch-size 300 -do-load -report-tags n1 -workers 20 -fileout=false| grep loaded`
#TDENGINERES=`cat data/tdengine.dat |gunzip|bin/bulk_load_tdengine --url 127.0.0.1:0 --batch-size 300 -do-load -report-tags n1 -workers 10 -fileout=false| grep loaded`
echo
echo -e "${GREEN}TDengine writing result:${NC}"
echo -e "${GREEN}$TDENGINERES${NC}"
DATA=`echo $TDENGINERES|awk '{print($2)}'`
TMP=`echo $TDENGINERES|awk '{print($5)}'`
TDWTM=`echo ${TMP%s*}`
[ -z "$TDWTM" ] || break
done
echo
echo "------------------Querying Data-----------------"
echo
sleep 10
echo
echo "start query test, query max from 8 hosts group by 1 hour, TDengine"
echo
#Test case 1
#测试用例1,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
# a,b,c,d,e,f,g,h are random 8 numbers.
for i in {1..5}; do
TDQS1=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-all -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 1 result:${NC}"
echo -e "${GREEN}$TDQS1${NC}"
TMP=`echo $TDQS1|awk '{print($4)}'`
TDQ1=`echo ${TMP%s*}`
[ -z "$TDQ1" ] || break
done
#Test case 2
#测试用例2,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1小时为粒度,查询每1小时的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
# a,b,c,d,e,f,g,h are random 8 numbers
for i in {1..5}; do
TDQS2=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-allbyhr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 2 result:${NC}"
echo -e "${GREEN}$TDQS2${NC}"
TMP=`echo $TDQS2|awk '{print($4)}'`
TDQ2=`echo ${TMP%s*}`
[ -z "$TDQ2" ] || break
done
#Test case 3
#测试用例3,测试用例3,随机查询12个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以10分钟为粒度,查询每10分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
for i in {1..5}; do
TDQS3=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-12-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 3 result:${NC}"
echo -e "${GREEN}$TDQS3${NC}"
TMP=`echo $TDQS3|awk '{print($4)}'`
TDQ3=`echo ${TMP%s*}`
[ -z "$TDQ3" ] || break
done
#Test case 4
#测试用例4,随机查询1个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1分钟为粒度,查询每1分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
for i in {1..5}; do
TDQS4=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-1-hr -scale-var 10 -queries 1000 | bin/query_benchmarker_tdengine -urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 4 result:${NC}"
echo -e "${GREEN}$TDQS4${NC}"
TMP=`echo $TDQS4|awk '{print($4)}'`
TDQ4=`echo ${TMP%s*}`
[ -z "$TDQ4" ] || break
done
sleep 10
echo
echo
echo "======================================================"
echo " tsdb performance comparision "
echo "======================================================"
echo -e " Writing $DATA records test takes: "
printf " TDengine | %-4.2f Seconds \n" $TDWTM
echo "------------------------------------------------------"
echo " Query test cases: "
echo " case 1: select the max(value) from all data "
echo " filtered out 8 hosts "
echo " Query test case 1 takes: "
printf " TDengine | %-4.5f Seconds \n" $TDQ1
echo "------------------------------------------------------"
echo " case 2: select the max(value) from all data "
echo " filtered out 8 hosts with an interval of 1 hour "
echo " case 2 takes: "
printf " TDengine | %-4.2f Seconds \n" $TDQ2
echo "------------------------------------------------------"
echo " case 3: select the max(value) from random 12 hours"
echo " data filtered out 8 hosts with an interval of 10 min "
echo " filtered out 8 hosts interval(1h) "
echo " case 3 takes: "
printf " TDengine | %-4.2f Seconds \n" $TDQ3
echo "------------------------------------------------------"
echo " case 4: select the max(value) from random 1 hour data "
echo " data filtered out 8 hosts with an interval of 1 min "
echo " case 4 takes: "
printf " TDengine | %-4.2f Seconds \n" $TDQ4
echo "------------------------------------------------------"
echo
today=`date +"%Y%m%d"`
echo "${today}, ${TDWTM}, ${TDQ1}, ${TDQ2}, ${TDQ3}, ${TDQ4}" >> /root/perftest-1d-report.csv
#bulk_query_gen/bulk_query_gen -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_influxdb/query_benchmarker_influxdb -urls="http://172.26.89.231:8086"
#bulk_query_gen/bulk_query_gen -format tdengine -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_tdengine/query_benchmarker_tdengine -urls="http://172.26.89.231:6020"
#!/user/bin/gnuplot
reset
set terminal png
set title "TaosDemo Performance Report" font ",20"
set ylabel "Time in Seconds"
set xdata time
set timefmt "%Y%m%d"
set format x "%Y-%m-%d"
set xlabel "Date"
set style data linespoints
set terminal pngcairo size 1024,768 enhanced font 'Segoe UI, 10'
set output 'taosdemo-report.png'
set datafile separator ','
set key reverse Left outside
set grid
plot 'taosdemo-report.csv' using 1:2 title "Create 10,000 Table", \
"" using 1:3 title "Create 10,000 Table and Insert 100,000 data", \
"" using 1:4 title "Request Per Second of Insert 100,000 data"
此差异已折叠。
...@@ -49,7 +49,8 @@ class TDTestCase: ...@@ -49,7 +49,8 @@ class TDTestCase:
tdSql.error("select * from db.st where ts > '2020-05-13 10:00:00.002' OR tagtype < 2") tdSql.error("select * from db.st where ts > '2020-05-13 10:00:00.002' OR tagtype < 2")
# illegal condition # illegal condition
tdSql.error("select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2") tdSql.error("select * from db.st where ts != '2020-05-13 10:00:00.002' OR tagtype < 2")
tdSql.error("select * from db.st where tagtype <> 1 OR tagtype < 2")
def stop(self): def stop(self):
tdSql.close() tdSql.close()
......
...@@ -41,13 +41,12 @@ class TDTestCase: ...@@ -41,13 +41,12 @@ class TDTestCase:
('2020-05-13 10:00:00.002', 3, 'third') dev_002 VALUES('2020-05-13 10:00:00.003', 1, 'first'), ('2020-05-13 10:00:00.004', 2, 'second'), ('2020-05-13 10:00:00.002', 3, 'third') dev_002 VALUES('2020-05-13 10:00:00.003', 1, 'first'), ('2020-05-13 10:00:00.004', 2, 'second'),
('2020-05-13 10:00:00.005', 3, 'third')""") ('2020-05-13 10:00:00.005', 3, 'third')""")
"""Error expected here, but no errors
# query first .. as .. # query first .. as ..
tdSql.error("select first(*) as one from st") tdSql.error("select first(*) as one from st")
# query last .. as .. # query last .. as ..
tdSql.error("select last(*) as latest from st") tdSql.error("select last(*) as latest from st")
"""
# query last row .. as .. # query last row .. as ..
tdSql.error("select last_row as latest from st") tdSql.error("select last_row as latest from st")
......
...@@ -31,14 +31,22 @@ class TDTestCase: ...@@ -31,14 +31,22 @@ class TDTestCase:
tdSql.execute("create table stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 binary(10), t3 nchar(10))") tdSql.execute("create table stb1 (ts timestamp, c1 int, c2 float) tags(t1 int, t2 binary(10), t3 nchar(10))")
tdSql.execute("insert into tb1 using stb1 tags(1,'tb1', '表1') values ('2020-04-18 15:00:00.000', 1, 0.1), ('2020-04-18 15:00:01.000', 2, 0.1)") tdSql.execute("insert into tb1 using stb1 tags(1,'tb1', '表1') values ('2020-04-18 15:00:00.000', 1, 0.1), ('2020-04-18 15:00:01.000', 2, 0.1)")
tdSql.execute("insert into tb2 using stb1 tags(2,'tb2', '表2') values ('2020-04-18 15:00:02.000', 3, 2.1), ('2020-04-18 15:00:03.000', 4, 2.2)") tdSql.execute("insert into tb2 using stb1 tags(2,'tb2', '表2') values ('2020-04-18 15:00:02.000', 3, 2.1), ('2020-04-18 15:00:03.000', 4, 2.2)")
# join 2 tables -- bug exists # inner join --- bug
# tdSql.query("select * from tb1 a, tb2 b where a.ts = b.ts") tdSql.query("select * from tb1 a, tb2 b where a.ts = b.ts")
# tdSql.checkRows(1) tdSql.checkRows(1)
# join 3 tables -- bug exists # join 3 tables -- bug exists
# tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id") tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
# query show stable
tdSql.query("show stables")
tdSql.checkRows(1)
# query show tables
tdSql.query("show table")
tdSql.checkRows(2)
# query count # query count
tdSql.query("select count(*) from stb1") tdSql.query("select count(*) from stb1")
tdSql.checkData(0, 0, 4) tdSql.checkData(0, 0, 4)
...@@ -51,6 +59,10 @@ class TDTestCase: ...@@ -51,6 +59,10 @@ class TDTestCase:
tdSql.query("select last(*) from stb1") tdSql.query("select last(*) from stb1")
tdSql.checkData(0, 1, 4) tdSql.checkData(0, 1, 4)
# query last_row
tdSql.query("select last_row(*) from stb1")
tdSql.checkData(0, 1, 4)
# query as # query as
tdSql.query("select t2 as number from stb1") tdSql.query("select t2 as number from stb1")
tdSql.checkRows(2) tdSql.checkRows(2)
...@@ -63,6 +75,10 @@ class TDTestCase: ...@@ -63,6 +75,10 @@ class TDTestCase:
tdSql.query("select last(*) as end from stb1") tdSql.query("select last(*) as end from stb1")
tdSql.checkData(0, 1, 4) tdSql.checkData(0, 1, 4)
# query last_row ... as
tdSql.query("select last_row(*) as end from stb1")
tdSql.checkData(0, 1, 4)
# query group .. by # query group .. by
tdSql.query("select sum(c1), t2 from stb1 group by t2") tdSql.query("select sum(c1), t2 from stb1 group by t2")
tdSql.checkRows(2) tdSql.checkRows(2)
...@@ -75,6 +91,34 @@ class TDTestCase: ...@@ -75,6 +91,34 @@ class TDTestCase:
tdSql.query("select * from stb1 limit 2 offset 3") tdSql.query("select * from stb1 limit 2 offset 3")
tdSql.checkRows(1) tdSql.checkRows(1)
# query ... alias for table ---- bug
tdSql.query("select t.ts from tb1 t")
tdSql.checkRows(2)
# query ... tbname
tdSql.query("select tbname from stb1")
tdSql.checkRows(2)
# query ... tbname count ---- bug
tdSql.query("select count(tbname) from stb1")
tdSql.checkRows(2)
# query ... select database ---- bug
tdSql.query("SELECT database()")
tdSql.checkRows(1)
# query ... select client_version ---- bug
tdSql.query("SELECT client_version()")
tdSql.checkRows(1)
# query ... select server_version ---- bug
tdSql.query("SELECT server_version()")
tdSql.checkRows(1)
# query ... select server_status ---- bug
tdSql.query("SELECT server_status()")
tdSql.checkRows(1)
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册