提交 92a89f75 编写于 作者: S Steven Li

Merge remote-tracking branch 'origin/develop' into feature/crash_gen

......@@ -891,11 +891,15 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return tscInvalidSQLErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z);
}
SKVRowBuilder kvRowBuilder = {0};
if (tdInitKVRowBuilder(&kvRowBuilder) < 0) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
uint32_t ignoreTokenTypes = TK_LP;
uint32_t numOfIgnoreToken = 1;
for (int i = 0; i < spd.numOfAssignedCols; ++i) {
char * tagVal = pTag->data + spd.elems[i].offset;
int16_t colIndex = spd.elems[i].colIndex;
SSchema* pSchema = pTagSchema + spd.elems[i].colIndex;
index = 0;
sToken = tStrGetToken(sql, &index, true, numOfIgnoreToken, &ignoreTokenTypes);
......@@ -911,12 +915,26 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
sToken.n -= 2;
}
code = tsParseOneColumnData(&pTagSchema[colIndex], &sToken, tagVal, pCmd->payload, &sql, false, tinfo.precision);
char tagVal[TSDB_MAX_TAGS_LEN];
code = tsParseOneColumnData(pSchema, &sToken, tagVal, pCmd->payload, &sql, false, tinfo.precision);
if (code != TSDB_CODE_SUCCESS) {
tdDestroyKVRowBuilder(&kvRowBuilder);
return code;
}
tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal);
}
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
tdDestroyKVRowBuilder(&kvRowBuilder);
if (row == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
tdSortKVRowByColIdx(row);
pTag->dataLen = kvRowLen(row);
kvRowCpy(pTag->data, row);
free(row);
index = 0;
sToken = tStrGetToken(sql, &index, false, 0, NULL);
sql += index;
......@@ -924,29 +942,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return tscInvalidSQLErrMsg(pCmd->payload, ") expected", sToken.z);
}
// 2. set the null value for the columns that do not assign values
if (spd.numOfAssignedCols < spd.numOfCols) {
char *ptr = pTag->data;
for (int32_t i = 0; i < spd.numOfCols; ++i) {
if (!spd.hasVal[i]) { // current tag column do not have any value to insert, set it to null
if (pTagSchema[i].type == TSDB_DATA_TYPE_BINARY || pTagSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
setVardataNull(ptr, pTagSchema[i].type);
} else {
setNull(ptr, pTagSchema[i].type, pTagSchema[i].bytes);
}
}
ptr += pTagSchema[i].bytes;
}
}
// 3. calculate the actual data size of STagData
pCmd->payloadLen = sizeof(pTag->name) + sizeof(pTag->dataLen);
for (int32_t t = 0; t < numOfTags; ++t) {
pTag->dataLen += pTagSchema[t].bytes;
pCmd->payloadLen += pTagSchema[t].bytes;
}
pCmd->payloadLen = sizeof(pTag->name) + sizeof(pTag->dataLen) + pTag->dataLen;
pTag->dataLen = htonl(pTag->dataLen);
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
......
......@@ -5623,24 +5623,41 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
SSchema* pTagSchema = tscGetTableTagSchema(pStableMeterMetaInfo->pTableMeta);
STagData* pTag = &pCreateTable->usingInfo.tagdata;
char* tagVal = pTag->data;
SKVRowBuilder kvRowBuilder = {0};
if (tdInitKVRowBuilder(&kvRowBuilder) < 0) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
int32_t ret = TSDB_CODE_SUCCESS;
for (int32_t i = 0; i < pList->nExpr; ++i) {
if (pTagSchema[i].type == TSDB_DATA_TYPE_BINARY || pTagSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
SSchema* pSchema = pTagSchema + i;
if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) {
// validate the length of binary
if (pList->a[i].pVar.nLen + VARSTR_HEADER_SIZE > pTagSchema[i].bytes) {
if (pList->a[i].pVar.nLen + VARSTR_HEADER_SIZE > pSchema->bytes) {
tdDestroyKVRowBuilder(&kvRowBuilder);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
}
ret = tVariantDump(&(pList->a[i].pVar), tagVal, pTagSchema[i].type, true);
char tagVal[TSDB_MAX_TAGS_LEN];
ret = tVariantDump(&(pList->a[i].pVar), tagVal, pSchema->type, true);
if (ret != TSDB_CODE_SUCCESS) {
tdDestroyKVRowBuilder(&kvRowBuilder);
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
tagVal += pTagSchema[i].bytes;
tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal);
}
SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder);
tdDestroyKVRowBuilder(&kvRowBuilder);
if (row == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
tdSortKVRowByColIdx(row);
pTag->dataLen = kvRowLen(row);
kvRowCpy(pTag->data, row);
free(row);
// table name
if (tscValidateName(&pInfo->pCreateTableInfo->name) != TSDB_CODE_SUCCESS) {
......@@ -5653,7 +5670,6 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
return ret;
}
pTag->dataLen = tagVal - pTag->data;
return TSDB_CODE_SUCCESS;
}
......
......@@ -191,7 +191,8 @@ int tscSendMsgToServer(SSqlObj *pSql) {
.msgType = pSql->cmd.msgType,
.pCont = pMsg,
.contLen = pSql->cmd.payloadLen,
.handle = pSql,
.ahandle = pSql,
.handle = &pSql->pRpcCtx,
.code = 0
};
......@@ -199,12 +200,12 @@ int tscSendMsgToServer(SSqlObj *pSql) {
// Otherwise, the pSql object may have been released already during the response function, which is
// processMsgFromServer function. In the meanwhile, the assignment of the rpc context to sql object will absolutely
// cause crash.
/*pSql->pRpcCtx = */rpcSendRequest(pObj->pDnodeConn, &pSql->ipList, &rpcMsg);
rpcSendRequest(pObj->pDnodeConn, &pSql->ipList, &rpcMsg);
return TSDB_CODE_SUCCESS;
}
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcIpSet *pIpSet) {
SSqlObj *pSql = (SSqlObj *)rpcMsg->handle;
SSqlObj *pSql = (SSqlObj *)rpcMsg->ahandle;
if (pSql == NULL || pSql->signature != pSql) {
tscError("%p sql is already released", pSql);
return;
......
......@@ -625,18 +625,31 @@ static int trimDataBlock(void* pDataBlock, STableDataBlocks* pTableDataBlock, bo
return len;
}
static int32_t getRowExpandSize(STableMeta* pTableMeta) {
int32_t result = TD_DATA_ROW_HEAD_SIZE;
int32_t columns = tscGetNumOfColumns(pTableMeta);
SSchema* pSchema = tscGetTableSchema(pTableMeta);
for(int32_t i = 0; i < columns; i++) {
if (IS_VAR_DATA_TYPE((pSchema + i)->type)) {
result += TYPE_BYTES[TSDB_DATA_TYPE_BINARY];
}
}
return result;
}
int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
SSqlCmd* pCmd = &pSql->cmd;
// the maximum expanded size in byte when a row-wise data is converted to SDataRow format
const int32_t MAX_EXPAND_SIZE = TD_DATA_ROW_HEAD_SIZE + TYPE_BYTES[TSDB_DATA_TYPE_BINARY];
STableDataBlocks* pOneTableBlock = taosArrayGetP(pTableDataBlockList, 0);
int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta);
void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false);
SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES);
size_t total = taosArrayGetSize(pTableDataBlockList);
for (int32_t i = 0; i < total; ++i) {
STableDataBlocks* pOneTableBlock = taosArrayGetP(pTableDataBlockList, i);
pOneTableBlock = taosArrayGetP(pTableDataBlockList, i);
STableDataBlocks* dataBuf = NULL;
int32_t ret =
......@@ -650,7 +663,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
}
SSubmitBlk* pBlocks = (SSubmitBlk*) pOneTableBlock->pData;
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * MAX_EXPAND_SIZE;
int64_t destSize = dataBuf->size + pOneTableBlock->size + pBlocks->numOfRows * expandSize;
if (dataBuf->nAllocSize < destSize) {
while (dataBuf->nAllocSize < destSize) {
......@@ -678,8 +691,7 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
tscDebug("%p tableId:%s, sid:%d rows:%d sversion:%d skey:%" PRId64 ", ekey:%" PRId64, pSql, pOneTableBlock->tableId,
pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, GET_INT64_VAL(pBlocks->data), GET_INT64_VAL(ekey));
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + MAX_EXPAND_SIZE);
int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize);
pBlocks->tid = htonl(pBlocks->tid);
pBlocks->uid = htobe64(pBlocks->uid);
......
......@@ -272,7 +272,7 @@ typedef struct {
int16_t offset;
} SColIdx;
#define TD_KV_ROW_HEAD_SIZE 2 * sizeof(int16_t)
#define TD_KV_ROW_HEAD_SIZE (2 * sizeof(int16_t))
#define kvRowLen(r) (*(int16_t *)(r))
#define kvRowNCols(r) (*(int16_t *)POINTER_SHIFT(r, sizeof(int16_t)))
......@@ -290,6 +290,7 @@ SKVRow tdKVRowDup(SKVRow row);
int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value);
int tdEncodeKVRow(void **buf, SKVRow row);
void * tdDecodeKVRow(void *buf, SKVRow *row);
void tdSortKVRowByColIdx(SKVRow row);
static FORCE_INLINE int comparTagId(const void *key1, const void *key2) {
if (*(int16_t *)key1 > ((SColIdx *)key2)->colId) {
......
......@@ -117,6 +117,7 @@ extern char tsDataDir[];
extern char tsLogDir[];
extern char tsScriptDir[];
extern int64_t tsMsPerDay[3];
extern char tsVnodeBakDir[];
// system info
extern char tsOsName[];
......
......@@ -515,6 +515,22 @@ SKVRow tdKVRowDup(SKVRow row) {
return trow;
}
static int compareColIdx(const void* a, const void* b) {
const SColIdx* x = (const SColIdx*)a;
const SColIdx* y = (const SColIdx*)b;
if (x->colId > y->colId) {
return 1;
}
if (x->colId < y->colId) {
return -1;
}
return 0;
}
void tdSortKVRowByColIdx(SKVRow row) {
qsort(kvRowColIdx(row), kvRowNCols(row), sizeof(SColIdx), compareColIdx);
}
int tdSetKVRowDataOfCol(SKVRow *orow, int16_t colId, int8_t type, void *value) {
SColIdx *pColIdx = NULL;
SKVRow row = *orow;
......
......@@ -153,6 +153,7 @@ char tsDnodeDir[TSDB_FILENAME_LEN] = {0};
char tsMnodeDir[TSDB_FILENAME_LEN] = {0};
char tsDataDir[TSDB_FILENAME_LEN] = "/var/lib/taos";
char tsScriptDir[TSDB_FILENAME_LEN] = "/etc/taos";
char tsVnodeBakDir[TSDB_FILENAME_LEN] = {0};
/*
* minimum scale for whole system, millisecond by default
......
......@@ -464,6 +464,29 @@ void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems) {
}
}
static uint8_t nullBool = TSDB_DATA_BOOL_NULL;
static uint8_t nullTinyInt = TSDB_DATA_TINYINT_NULL;
static uint16_t nullSmallInt = TSDB_DATA_SMALLINT_NULL;
static uint32_t nullInt = TSDB_DATA_INT_NULL;
static uint64_t nullBigInt = TSDB_DATA_BIGINT_NULL;
static uint32_t nullFloat = TSDB_DATA_FLOAT_NULL;
static uint64_t nullDouble = TSDB_DATA_DOUBLE_NULL;
static union {
tstr str;
char pad[sizeof(tstr) + 4];
} nullBinary = {.str = {.len = 1}}, nullNchar = {.str = {.len = 4}};
static void *nullValues[] = {
&nullBool, &nullTinyInt, &nullSmallInt, &nullInt, &nullBigInt,
&nullFloat, &nullDouble, &nullBinary, &nullBigInt, &nullNchar,
};
void *getNullValue(int32_t type) {
assert(type >= TSDB_DATA_TYPE_BOOL && type <= TSDB_DATA_TYPE_NCHAR);
return nullValues[type - 1];
}
void assignVal(char *val, const char *src, int32_t len, int32_t type) {
switch (type) {
case TSDB_DATA_TYPE_INT: {
......
......@@ -256,30 +256,13 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
SDataRow trow = (SDataRow)pBlk->data;
tdInitDataRow(trow, pSchema);
union {
char buf[sizeof(int64_t)];
tstr str;
} nullVal;
for (int32_t i = 0; i < pSchema->numOfCols; i++) {
STColumn *c = pSchema->columns + i;
char* val = (char*)row[i];
if (IS_VAR_DATA_TYPE(c->type)) {
if (val == NULL) {
val = nullVal.buf;
if (c->type == TSDB_DATA_TYPE_BINARY) {
setNull(nullVal.str.data, TSDB_DATA_TYPE_BINARY, 1);
nullVal.str.len = 1;
} else {
setNull(nullVal.str.data, TSDB_DATA_TYPE_NCHAR, 4);
nullVal.str.len = 4;
}
} else {
val -= sizeof(VarDataLenT);
}
} else if (val == NULL) {
val = nullVal.buf;
setNull(val, c->type, c->bytes);
void* val = row[i];
if (val == NULL) {
val = getNullValue(c->type);
} else if (IS_VAR_DATA_TYPE(c->type)) {
val = ((char*)val) - sizeof(VarDataLenT);
}
tdAppendColVal(trow, val, c->type, c->bytes, c->offset);
}
......
......@@ -171,6 +171,7 @@ static int32_t dnodeInitStorage() {
sprintf(tsMnodeDir, "%s/mnode", tsDataDir);
sprintf(tsVnodeDir, "%s/vnode", tsDataDir);
sprintf(tsDnodeDir, "%s/dnode", tsDataDir);
sprintf(tsVnodeBakDir, "%s/vnode_bak", tsDataDir);
//TODO(dengyihao): no need to init here
if (dnodeCreateDir(tsMnodeDir) < 0) {
......@@ -186,6 +187,10 @@ static int32_t dnodeInitStorage() {
dError("failed to create dir: %s, reason: %s", tsDnodeDir, strerror(errno));
return -1;
}
if (dnodeCreateDir(tsVnodeBakDir) < 0) {
dError("failed to create dir: %s, reason: %s", tsVnodeBakDir, strerror(errno));
return -1;
}
dnodeCheckDataDirOpenned(tsDnodeDir);
......
......@@ -165,6 +165,7 @@ bool isNull(const char *val, int32_t type);
void setVardataNull(char* val, int32_t type);
void setNull(char *val, int32_t type, int32_t bytes);
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
void* getNullValue(int32_t type);
void assignVal(char *val, const char *src, int32_t len, int32_t type);
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
......
......@@ -26,6 +26,7 @@ extern "C" {
#include "taosdef.h"
#include "taoserror.h"
#include "trpc.h"
#include "tdataformat.h"
// message type
......@@ -674,7 +675,7 @@ typedef struct SMultiTableMeta {
typedef struct {
int32_t dataLen;
char name[TSDB_TABLE_ID_LEN];
char data[TSDB_MAX_TAGS_LEN];
char data[TSDB_MAX_TAGS_LEN + TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * TSDB_MAX_TAGS];
} STagData;
/*
......
......@@ -47,8 +47,8 @@ typedef struct SRpcMsg {
void *pCont;
int contLen;
int32_t code;
void *handle;
void *ahandle; //app handle set by client, for debug purpose
void *handle; // rpc handle returned to app
void *ahandle; // app handle set by client
} SRpcMsg;
typedef struct SRpcInit {
......@@ -78,11 +78,11 @@ void rpcClose(void *);
void *rpcMallocCont(int contLen);
void rpcFreeCont(void *pCont);
void *rpcReallocCont(void *ptr, int contLen);
void *rpcSendRequest(void *thandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg);
void rpcSendRequest(void *thandle, const SRpcIpSet *pIpSet, SRpcMsg *pMsg);
void rpcSendResponse(const SRpcMsg *pMsg);
void rpcSendRedirectRsp(void *pConn, const SRpcIpSet *pIpSet);
int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo);
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, const SRpcMsg *pReq, SRpcMsg *pRsp);
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, SRpcMsg *pReq, SRpcMsg *pRsp);
int rpcReportProgress(void *pConn, char *pCont, int contLen);
void rpcCancelRequest(void *pContext);
......
......@@ -808,27 +808,31 @@ void *readMetric(void *sarg) {
}
void queryDB(TAOS *taos, char *command) {
int i = 5;
int i;
TAOS_RES *pSql = NULL;
int32_t code = -1;
while (i > 0 && code != 0) {
int32_t code = -1;
for (i = 0; i < 5; i++) {
if (NULL != pSql) {
taos_free_result(pSql);
pSql = NULL;
}
pSql = taos_query(taos, command);
code = taos_errno(pSql);
taos_free_result(pSql);
pSql = NULL;
if (code == 0) {
if (0 == code) {
break;
}
i--;
}
}
if (code != 0) {
fprintf(stderr, "Failed to run %s, reason: %s\n", command, taos_errstr(pSql));
taos_free_result(pSql);
taos_close(taos);
exit(EXIT_FAILURE);
}
taos_free_result(pSql);
}
// sync insertion
......
......@@ -264,7 +264,7 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
strcpy(pMdCfgDnode->config, pCmCfgDnode->config);
SRpcMsg rpcMdCfgDnodeMsg = {
.handle = 0,
.ahandle = 0,
.code = 0,
.msgType = TSDB_MSG_TYPE_MD_CONFIG_DNODE,
.pCont = pMdCfgDnode,
......
......@@ -1574,7 +1574,7 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) {
SRpcIpSet ipSet = mnodeGetIpSetFromVgroup(pMsg->pVgroup);
SRpcMsg rpcMsg = {
.handle = pMsg,
.ahandle = pMsg,
.pCont = pMDCreate,
.contLen = htonl(pMDCreate->contLen),
.code = 0,
......@@ -1751,7 +1751,7 @@ static int32_t mnodeProcessDropChildTableMsg(SMnodeMsg *pMsg) {
mInfo("app:%p:%p, table:%s, send drop ctable msg", pMsg->rpcMsg.ahandle, pMsg, pDrop->tableId);
SRpcMsg rpcMsg = {
.handle = pMsg,
.ahandle = pMsg,
.pCont = pDrop,
.contLen = sizeof(SMDDropTableMsg),
.code = 0,
......@@ -1799,7 +1799,7 @@ static int32_t mnodeAlterNormalTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
SRpcIpSet ipSet = mnodeGetIpSetFromVgroup(pMsg->pVgroup);
SRpcMsg rpcMsg = {
.handle = pMsg,
.ahandle = pMsg,
.pCont = pMDCreate,
.contLen = htonl(pMDCreate->contLen),
.code = 0,
......@@ -2144,9 +2144,9 @@ static int32_t mnodeProcessTableCfgMsg(SMnodeMsg *pMsg) {
// handle drop child response
static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg) {
if (rpcMsg->handle == NULL) return;
if (rpcMsg->ahandle == NULL) return;
SMnodeMsg *mnodeMsg = rpcMsg->handle;
SMnodeMsg *mnodeMsg = rpcMsg->ahandle;
mnodeMsg->received++;
SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable;
......@@ -2195,9 +2195,9 @@ static void mnodeProcessDropChildTableRsp(SRpcMsg *rpcMsg) {
* if failed, drop the table cached
*/
static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) {
if (rpcMsg->handle == NULL) return;
if (rpcMsg->ahandle == NULL) return;
SMnodeMsg *mnodeMsg = rpcMsg->handle;
SMnodeMsg *mnodeMsg = rpcMsg->ahandle;
mnodeMsg->received++;
SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable;
......@@ -2238,9 +2238,9 @@ static void mnodeProcessCreateChildTableRsp(SRpcMsg *rpcMsg) {
}
static void mnodeProcessAlterTableRsp(SRpcMsg *rpcMsg) {
if (rpcMsg->handle == NULL) return;
if (rpcMsg->ahandle == NULL) return;
SMnodeMsg *mnodeMsg = rpcMsg->handle;
SMnodeMsg *mnodeMsg = rpcMsg->ahandle;
mnodeMsg->received++;
SChildTableObj *pTable = (SChildTableObj *)mnodeMsg->pTable;
......
......@@ -652,7 +652,7 @@ SRpcIpSet mnodeGetIpSetFromIp(char *ep) {
void mnodeSendCreateVnodeMsg(SVgObj *pVgroup, SRpcIpSet *ipSet, void *ahandle) {
SMDCreateVnodeMsg *pCreate = mnodeBuildCreateVnodeMsg(pVgroup);
SRpcMsg rpcMsg = {
.handle = ahandle,
.ahandle = ahandle,
.pCont = pCreate,
.contLen = pCreate ? sizeof(SMDCreateVnodeMsg) : 0,
.code = 0,
......@@ -673,9 +673,9 @@ void mnodeSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle) {
}
static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) {
if (rpcMsg->handle == NULL) return;
if (rpcMsg->ahandle == NULL) return;
SMnodeMsg *mnodeMsg = rpcMsg->handle;
SMnodeMsg *mnodeMsg = rpcMsg->ahandle;
mnodeMsg->received++;
if (rpcMsg->code == TSDB_CODE_SUCCESS) {
mnodeMsg->successed++;
......@@ -686,7 +686,7 @@ static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) {
SVgObj *pVgroup = mnodeMsg->pVgroup;
mDebug("vgId:%d, create vnode rsp received, result:%s received:%d successed:%d expected:%d, thandle:%p ahandle:%p",
pVgroup->vgId, tstrerror(rpcMsg->code), mnodeMsg->received, mnodeMsg->successed, mnodeMsg->expected,
mnodeMsg->rpcMsg.handle, rpcMsg->handle);
mnodeMsg->rpcMsg.handle, rpcMsg->ahandle);
if (mnodeMsg->received != mnodeMsg->expected) return;
......@@ -718,7 +718,7 @@ static SMDDropVnodeMsg *mnodeBuildDropVnodeMsg(int32_t vgId) {
void mnodeSendDropVnodeMsg(int32_t vgId, SRpcIpSet *ipSet, void *ahandle) {
SMDDropVnodeMsg *pDrop = mnodeBuildDropVnodeMsg(vgId);
SRpcMsg rpcMsg = {
.handle = ahandle,
.ahandle = ahandle,
.pCont = pDrop,
.contLen = pDrop ? sizeof(SMDDropVnodeMsg) : 0,
.code = 0,
......@@ -737,10 +737,10 @@ static void mnodeSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle) {
}
static void mnodeProcessDropVnodeRsp(SRpcMsg *rpcMsg) {
mDebug("drop vnode rsp is received, handle:%p", rpcMsg->handle);
if (rpcMsg->handle == NULL) return;
mDebug("drop vnode rsp is received, handle:%p", rpcMsg->ahandle);
if (rpcMsg->ahandle == NULL) return;
SMnodeMsg *mnodeMsg = rpcMsg->handle;
SMnodeMsg *mnodeMsg = rpcMsg->ahandle;
mnodeMsg->received++;
if (rpcMsg->code == TSDB_CODE_SUCCESS) {
mnodeMsg->code = rpcMsg->code;
......@@ -750,7 +750,7 @@ static void mnodeProcessDropVnodeRsp(SRpcMsg *rpcMsg) {
SVgObj *pVgroup = mnodeMsg->pVgroup;
mDebug("vgId:%d, drop vnode rsp received, result:%s received:%d successed:%d expected:%d, thandle:%p ahandle:%p",
pVgroup->vgId, tstrerror(rpcMsg->code), mnodeMsg->received, mnodeMsg->successed, mnodeMsg->expected,
mnodeMsg->rpcMsg.handle, rpcMsg->handle);
mnodeMsg->rpcMsg.handle, rpcMsg->ahandle);
if (mnodeMsg->received != mnodeMsg->expected) return;
......
......@@ -236,6 +236,9 @@ void taosSetCoreDump();
void taosBlockSIGPIPE();
int tSystem(const char * cmd) ;
#ifdef _ALPINE
typedef int(*__compar_fn_t)(const void *, const void *);
void error (int, int, const char *);
......
......@@ -241,3 +241,32 @@ void taosBlockSIGPIPE() {
uError("failed to block SIGPIPE");
}
}
int tSystem(const char * cmd)
{
FILE * fp;
int res;
char buf[1024];
if (cmd == NULL) {
uError("tSystem cmd is NULL!\n");
return -1;
}
if ((fp = popen(cmd, "r") ) == NULL) {
uError("popen cmd:%s error: %s/n", cmd, strerror(errno));
return -1;
} else {
while(fgets(buf, sizeof(buf), fp)) {
uDebug("popen result:%s", buf);
}
if ((res = pclose(fp)) == -1) {
uError("close popen file pointer fp error!\n");
} else {
uDebug("popen res is :%d\n", res);
}
return res;
}
}
......@@ -141,10 +141,15 @@ HttpContext *httpGetContext(void *ptr) {
void httpReleaseContext(HttpContext *pContext) {
int32_t refCount = atomic_sub_fetch_32(&pContext->refCount, 1);
assert(refCount >= 0);
httpDebug("context:%p, fd:%d, is releasd, refCount:%d", pContext, pContext->fd, refCount);
httpDebug("context:%p, is releasd, refCount:%d", pContext, refCount);
HttpContext **ppContext = pContext->ppContext;
taosCacheRelease(tsHttpServer.contextCache, (void **)(&ppContext), false);
if (tsHttpServer.contextCache != NULL) {
taosCacheRelease(tsHttpServer.contextCache, (void **)(&ppContext), false);
} else {
httpDebug("context:%p, won't be destroyed for cache is already released", pContext);
// httpDestroyContext((void **)(&ppContext));
}
}
bool httpInitContext(HttpContext *pContext) {
......
......@@ -157,7 +157,7 @@ bool httpGetHttpMethod(HttpContext* pContext) {
pParser->method.pos[pParser->method.len] = 0;
pParser->pLast = pSeek + 1;
httpDebug("context:%p, fd:%d, ip:%s, httpMethod:%s", pContext, pContext->fd, pContext->ipstr, pParser->method.pos);
httpTrace("context:%p, fd:%d, ip:%s, httpMethod:%s", pContext, pContext->fd, pContext->ipstr, pParser->method.pos);
return true;
}
......@@ -186,23 +186,23 @@ bool httpParseHead(HttpContext* pContext) {
HttpParser* pParser = &pContext->parser;
if (strncasecmp(pParser->pLast, "Content-Length: ", 16) == 0) {
pParser->data.len = (int32_t)atoi(pParser->pLast + 16);
httpDebug("context:%p, fd:%d, ip:%s, Content-Length:%d", pContext, pContext->fd, pContext->ipstr,
httpTrace("context:%p, fd:%d, ip:%s, Content-Length:%d", pContext, pContext->fd, pContext->ipstr,
pParser->data.len);
} else if (strncasecmp(pParser->pLast, "Accept-Encoding: ", 17) == 0) {
if (tsHttpEnableCompress && strstr(pParser->pLast + 17, "gzip") != NULL) {
pContext->acceptEncoding = HTTP_COMPRESS_GZIP;
httpDebug("context:%p, fd:%d, ip:%s, Accept-Encoding:gzip", pContext, pContext->fd, pContext->ipstr);
httpTrace("context:%p, fd:%d, ip:%s, Accept-Encoding:gzip", pContext, pContext->fd, pContext->ipstr);
} else {
pContext->acceptEncoding = HTTP_COMPRESS_IDENTITY;
httpDebug("context:%p, fd:%d, ip:%s, Accept-Encoding:identity", pContext, pContext->fd, pContext->ipstr);
httpTrace("context:%p, fd:%d, ip:%s, Accept-Encoding:identity", pContext, pContext->fd, pContext->ipstr);
}
} else if (strncasecmp(pParser->pLast, "Content-Encoding: ", 18) == 0) {
if (strstr(pParser->pLast + 18, "gzip") != NULL) {
pContext->contentEncoding = HTTP_COMPRESS_GZIP;
httpDebug("context:%p, fd:%d, ip:%s, Content-Encoding:gzip", pContext, pContext->fd, pContext->ipstr);
httpTrace("context:%p, fd:%d, ip:%s, Content-Encoding:gzip", pContext, pContext->fd, pContext->ipstr);
} else {
pContext->contentEncoding = HTTP_COMPRESS_IDENTITY;
httpDebug("context:%p, fd:%d, ip:%s, Content-Encoding:identity", pContext, pContext->fd, pContext->ipstr);
httpTrace("context:%p, fd:%d, ip:%s, Content-Encoding:identity", pContext, pContext->fd, pContext->ipstr);
}
} else if (strncasecmp(pParser->pLast, "Connection: ", 12) == 0) {
if (strncasecmp(pParser->pLast + 12, "Keep-Alive", 10) == 0) {
......@@ -210,7 +210,7 @@ bool httpParseHead(HttpContext* pContext) {
} else {
pContext->httpKeepAlive = HTTP_KEEPALIVE_DISABLE;
}
httpDebug("context:%p, fd:%d, ip:%s, keepAlive:%d", pContext, pContext->fd, pContext->ipstr,
httpTrace("context:%p, fd:%d, ip:%s, keepAlive:%d", pContext, pContext->fd, pContext->ipstr,
pContext->httpKeepAlive);
} else if (strncasecmp(pParser->pLast, "Transfer-Encoding: ", 19) == 0) {
if (strncasecmp(pParser->pLast + 19, "chunked", 7) == 0) {
......@@ -281,7 +281,7 @@ bool httpReadChunkedBody(HttpContext* pContext, HttpParser* pParser) {
httpParseChunkedBody(pContext, pParser, false);
return HTTP_CHECK_BODY_SUCCESS;
} else {
httpDebug("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr);
httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr);
if (!httpReadDataImp(pContext)) {
httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr);
return HTTP_CHECK_BODY_ERROR;
......@@ -299,7 +299,7 @@ int httpReadUnChunkedBody(HttpContext* pContext, HttpParser* pParser) {
httpSendErrorResp(pContext, HTTP_PARSE_BODY_ERROR);
return HTTP_CHECK_BODY_ERROR;
} else if (dataReadLen < pParser->data.len) {
httpDebug("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read",
httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read",
pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len);
return HTTP_CHECK_BODY_CONTINUE;
} else {
......@@ -313,9 +313,9 @@ bool httpParseRequest(HttpContext* pContext) {
return true;
}
httpDebug("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, raw data:\n%s",
pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds,
pContext->parser.bufsize, pContext->parser.buffer);
httpTraceDump("context:%p, fd:%d, ip:%s, thread:%s, numOfFds:%d, read size:%d, raw data:\n%s", pContext, pContext->fd,
pContext->ipstr, pContext->pThread->label, pContext->pThread->numOfFds, pContext->parser.bufsize,
pContext->parser.buffer);
if (!httpGetHttpMethod(pContext)) {
return false;
......
......@@ -76,8 +76,8 @@ int httpWriteBuf(struct HttpContext *pContext, const char *buf, int sz) {
httpError("context:%p, fd:%d, ip:%s, dataSize:%d, writeSize:%d, failed to send response:\n%s",
pContext, pContext->fd, pContext->ipstr, sz, writeSz, buf);
} else {
httpDebug("context:%p, fd:%d, ip:%s, dataSize:%d, writeSize:%d, response:\n%s",
pContext, pContext->fd, pContext->ipstr, sz, writeSz, buf);
httpTrace("context:%p, fd:%d, ip:%s, dataSize:%d, writeSize:%d, response:\n%s", pContext, pContext->fd,
pContext->ipstr, sz, writeSz, buf);
}
return writeSz;
......@@ -99,7 +99,7 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) {
uint64_t srcLen = (uint64_t) (buf->lst - buf->buf);
if (buf->pContext->fd <= 0) {
httpDebug("context:%p, fd:%d, ip:%s, write json body error", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
httpTrace("context:%p, fd:%d, ip:%s, write json body error", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
buf->pContext->fd = -1;
}
......@@ -113,11 +113,11 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) {
if (buf->pContext->acceptEncoding == HTTP_COMPRESS_IDENTITY) {
if (buf->lst == buf->buf) {
httpDebug("context:%p, fd:%d, ip:%s, no data need dump", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
httpTrace("context:%p, fd:%d, ip:%s, no data need dump", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
return 0; // there is no data to dump.
} else {
int len = sprintf(sLen, "%lx\r\n", srcLen);
httpDebug("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", response:\n%s",
httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", response:\n%s",
buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, buf->buf);
httpWriteBufNoTrace(buf->pContext, sLen, len);
remain = httpWriteBufNoTrace(buf->pContext, buf->buf, (int) srcLen);
......@@ -129,12 +129,12 @@ int httpWriteJsonBufBody(JsonBuf* buf, bool isTheLast) {
if (ret == 0) {
if (compressBufLen > 0) {
int len = sprintf(sLen, "%x\r\n", compressBufLen);
httpDebug("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", compressSize:%d, last:%d, response:\n%s",
httpTrace("context:%p, fd:%d, ip:%s, write body, chunkSize:%" PRIu64 ", compressSize:%d, last:%d, response:\n%s",
buf->pContext, buf->pContext->fd, buf->pContext->ipstr, srcLen, compressBufLen, isTheLast, buf->buf);
httpWriteBufNoTrace(buf->pContext, sLen, len);
remain = httpWriteBufNoTrace(buf->pContext, (const char *) compressBuf, (int) compressBufLen);
} else {
httpDebug("context:%p, fd:%d, ip:%s, last:%d, compress already dumped, response:\n%s",
httpTrace("context:%p, fd:%d, ip:%s, last:%d, compress already dumped, response:\n%s",
buf->pContext, buf->pContext->fd, buf->pContext->ipstr, isTheLast, buf->buf);
return 0; // there is no data to dump.
}
......@@ -173,7 +173,7 @@ void httpWriteJsonBufHead(JsonBuf* buf) {
void httpWriteJsonBufEnd(JsonBuf* buf) {
if (buf->pContext->fd <= 0) {
httpDebug("context:%p, fd:%d, ip:%s, json buf fd is 0", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
httpTrace("context:%p, fd:%d, ip:%s, json buf fd is 0", buf->pContext, buf->pContext->fd, buf->pContext->ipstr);
buf->pContext->fd = -1;
}
......
......@@ -66,8 +66,6 @@ void httpCleanUpConnect() {
}
}
tfree(pServer->pThreads);
pServer->pThreads = NULL;
httpDebug("http server:%s is cleaned up", pServer->label);
}
......
......@@ -95,11 +95,13 @@ void httpCleanUpSystem() {
httpInfo("http server cleanup");
httpStopSystem();
httpCleanUpConnect();
httpCleanupContexts();
httpCleanUpSessions();
httpCleanUpConnect();
pthread_mutex_destroy(&tsHttpServer.serverMutex);
tfree(tsHttpServer.pThreads);
tsHttpServer.pThreads = NULL;
tsHttpServer.status = HTTP_SERVER_CLOSED;
}
......
......@@ -354,13 +354,13 @@ void *rpcReallocCont(void *ptr, int contLen) {
return start + sizeof(SRpcReqContext) + sizeof(SRpcHead);
}
void *rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg) {
void rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, SRpcMsg *pMsg) {
SRpcInfo *pRpc = (SRpcInfo *)shandle;
SRpcReqContext *pContext;
int contLen = rpcCompressRpcMsg(pMsg->pCont, pMsg->contLen);
pContext = (SRpcReqContext *) (pMsg->pCont-sizeof(SRpcHead)-sizeof(SRpcReqContext));
pContext->ahandle = pMsg->handle;
pContext->ahandle = pMsg->ahandle;
pContext->pRpc = (SRpcInfo *)shandle;
pContext->ipSet = *pIpSet;
pContext->contLen = contLen;
......@@ -380,9 +380,12 @@ void *rpcSendRequest(void *shandle, const SRpcIpSet *pIpSet, const SRpcMsg *pMsg
|| type == TSDB_MSG_TYPE_CM_SHOW )
pContext->connType = RPC_CONN_TCPC;
// set the handle to pContext, so app can cancel the request
if (pMsg->handle) *((void **)pMsg->handle) = pContext;
rpcSendReqToServer(pRpc, pContext);
return pContext;
return;
}
void rpcSendResponse(const SRpcMsg *pRsp) {
......@@ -483,7 +486,7 @@ int rpcGetConnInfo(void *thandle, SRpcConnInfo *pInfo) {
return 0;
}
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, const SRpcMsg *pMsg, SRpcMsg *pRsp) {
void rpcSendRecv(void *shandle, SRpcIpSet *pIpSet, SRpcMsg *pMsg, SRpcMsg *pRsp) {
SRpcReqContext *pContext;
pContext = (SRpcReqContext *) (pMsg->pCont-sizeof(SRpcHead)-sizeof(SRpcReqContext));
......@@ -638,7 +641,7 @@ static SRpcConn *rpcAllocateClientConn(SRpcInfo *pRpc) {
pConn->pRpc = pRpc;
pConn->sid = sid;
pConn->tranId = (uint16_t)(rand() & 0xFFFF);
pConn->tranId = (uint16_t)(random() & 0xFFFF);
pConn->ownId = htonl(pConn->sid);
pConn->linkUid = (uint32_t)((int64_t)pConn + (int64_t)getpid());
pConn->spi = pRpc->spi;
......@@ -1051,7 +1054,7 @@ static void rpcProcessIncomingMsg(SRpcConn *pConn, SRpcHead *pHead) {
} else {
// it's a response
SRpcReqContext *pContext = pConn->pContext;
rpcMsg.handle = pContext->ahandle;
rpcMsg.handle = pContext;
pConn->pContext = NULL;
// for UDP, port may be changed by server, the port in ipSet shall be used for cache
......@@ -1255,7 +1258,7 @@ static void rpcProcessConnError(void *param, void *id) {
if (pContext->numOfTry >= pContext->ipSet.numOfIps) {
rpcMsg.msgType = pContext->msgType+1;
rpcMsg.handle = pContext->ahandle;
rpcMsg.ahandle = pContext->ahandle;
rpcMsg.code = pContext->code;
rpcMsg.pCont = NULL;
rpcMsg.contLen = 0;
......
......@@ -33,7 +33,7 @@ typedef struct {
} SInfo;
static void processResponse(SRpcMsg *pMsg, SRpcIpSet *pIpSet) {
SInfo *pInfo = (SInfo *)pMsg->handle;
SInfo *pInfo = (SInfo *)pMsg->ahandle;
tDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen, pMsg->code);
if (pIpSet) pInfo->ipSet = *pIpSet;
......@@ -46,7 +46,7 @@ static int tcount = 0;
static void *sendRequest(void *param) {
SInfo *pInfo = (SInfo *)param;
SRpcMsg rpcMsg;
SRpcMsg rpcMsg = {0};
tDebug("thread:%d, start to send request", pInfo->index);
......@@ -54,7 +54,7 @@ static void *sendRequest(void *param) {
pInfo->num++;
rpcMsg.pCont = rpcMallocCont(pInfo->msgSize);
rpcMsg.contLen = pInfo->msgSize;
rpcMsg.handle = pInfo;
rpcMsg.ahandle = pInfo;
rpcMsg.msgType = 1;
tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num);
rpcSendRequest(pInfo->pRpc, &pInfo->ipSet, &rpcMsg);
......
......@@ -233,26 +233,10 @@ STableCfg *tsdbCreateTableCfgFromMsg(SMDCreateTableMsg *pMsg) {
if (tsdbTableSetSName(pCfg, pMsg->superTableId, true) < 0) goto _err;
if (tsdbTableSetSuperUid(pCfg, htobe64(pMsg->superTableUid)) < 0) goto _err;
// Decode tag values
if (pMsg->tagDataLen) {
int accBytes = 0;
int32_t tagDataLen = htonl(pMsg->tagDataLen);
if (tagDataLen) {
char *pTagData = pMsg->data + (numOfCols + numOfTags) * sizeof(SSchema);
SKVRowBuilder kvRowBuilder = {0};
if (tdInitKVRowBuilder(&kvRowBuilder) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
}
for (int i = numOfCols; i < numOfCols + numOfTags; i++) {
if (tdAddColToKVRow(&kvRowBuilder, htons(pSchema[i].colId), pSchema[i].type, pTagData + accBytes) < 0) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
}
accBytes += htons(pSchema[i].bytes);
}
tsdbTableSetTagValue(pCfg, tdGetKVRowFromBuilder(&kvRowBuilder), false);
tdDestroyKVRowBuilder(&kvRowBuilder);
tsdbTableSetTagValue(pCfg, pTagData, true);
}
}
......@@ -620,6 +604,10 @@ static char *getTagIndexKey(const void *pData) {
STSchema *pSchema = tsdbGetTableTagSchema(pTable);
STColumn *pCol = schemaColAt(pSchema, DEFAULT_TAG_INDEX_COLUMN);
void * res = tdGetKVRowValOfCol(pTable->tagVal, pCol->colId);
if (res == NULL) {
// treat the column as NULL if we cannot find it
res = getNullValue(pCol->type);
}
return res;
}
......
......@@ -184,6 +184,7 @@ uint32_t ip2uint(const char *const ip_addr);
void taosRemoveDir(char *rootDir);
int tmkdir(const char *pathname, mode_t mode);
void taosMvDir(char* destDir, char *srcDir);
#define TAOS_ALLOC_MODE_DEFAULT 0
#define TAOS_ALLOC_MODE_RANDOM_FAIL 1
......
......@@ -119,7 +119,7 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheDataNo
int32_t size = pNode->size;
taosHashRemove(pCacheObj->pHashTable, pNode->key, pNode->keySize);
uDebug("key:%s is removed from cache,total:%" PRId64 ",size:%dbytes", pNode->key, pCacheObj->totalSize, size);
uDebug("key:%s, is removed from cache, total:%" PRId64 " size:%d bytes", pNode->key, pCacheObj->totalSize, size);
if (pCacheObj->freeFp) pCacheObj->freeFp(pNode->data);
free(pNode);
}
......@@ -288,14 +288,14 @@ void *taosCachePut(SCacheObj *pCacheObj, const char *key, const void *pData, siz
if (NULL != pNode) {
pCacheObj->totalSize += pNode->size;
uDebug("key:%s %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", total:%" PRId64 ", size:%" PRId64 " bytes",
uDebug("key:%s, %p added into cache, added:%" PRIu64 ", expire:%" PRIu64 ", total:%" PRId64 ", size:%" PRId64 " bytes",
key, pNode, pNode->addedTime, pNode->expiredTime, pCacheObj->totalSize, dataSize);
} else {
uError("key:%s failed to added into cache, out of memory", key);
uError("key:%s, failed to added into cache, out of memory", key);
}
} else { // old data exists, update the node
pNode = taosUpdateCacheImpl(pCacheObj, pOld, key, keyLen, pData, dataSize, duration * 1000L);
uDebug("key:%s %p exist in cache, updated", key, pNode);
uDebug("key:%s, %p exist in cache, updated", key, pNode);
}
__cache_unlock(pCacheObj);
......@@ -321,10 +321,10 @@ void *taosCacheAcquireByName(SCacheObj *pCacheObj, const char *key) {
if (ptNode != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
uDebug("key:%s is retrieved from cache, %p refcnt:%d", key, (*ptNode), T_REF_VAL_GET(*ptNode));
uDebug("key:%s, is retrieved from cache, %p refcnt:%d", key, (*ptNode), T_REF_VAL_GET(*ptNode));
} else {
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
uDebug("key:%s not in cache, retrieved failed", key);
uDebug("key:%s, not in cache, retrieved failed", key);
}
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
......@@ -350,10 +350,10 @@ void* taosCacheUpdateExpireTimeByName(SCacheObj *pCacheObj, const char *key, uin
if (ptNode != NULL) {
atomic_add_fetch_32(&pCacheObj->statistics.hitCount, 1);
uDebug("key:%s expireTime is updated in cache, %p refcnt:%d", key, (*ptNode), T_REF_VAL_GET(*ptNode));
uDebug("key:%s, expireTime is updated in cache, %p refcnt:%d", key, (*ptNode), T_REF_VAL_GET(*ptNode));
} else {
atomic_add_fetch_32(&pCacheObj->statistics.missCount, 1);
uDebug("key:%s not in cache, retrieved failed", key);
uDebug("key:%s, not in cache, retrieved failed", key);
}
atomic_add_fetch_32(&pCacheObj->statistics.totalAccess, 1);
......@@ -410,13 +410,13 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) {
SCacheDataNode *pNode = (SCacheDataNode *)((char *)(*data) - offset);
if (pNode->signature != (uint64_t)pNode) {
uError("key: %p release invalid cache data", pNode);
uError("%p release invalid cache data", pNode);
return;
}
*data = NULL;
int32_t ref = T_REF_DEC(pNode);
uDebug("%p data released, refcnt:%d", pNode, ref);
uDebug("key:%s, is released, %p refcnt:%d", pNode->key, pNode, ref);
if (_remove) {
__cache_wr_lock(pCacheObj);
......@@ -501,7 +501,7 @@ void taosAddToTrash(SCacheObj *pCacheObj, SCacheDataNode *pNode) {
pNode->inTrashCan = true;
pCacheObj->numOfElemsInTrash++;
uDebug("key:%s %p move to trash, numOfElem in trash:%d", pNode->key, pNode, pCacheObj->numOfElemsInTrash);
uDebug("key:%s, %p move to trash, numOfElem in trash:%d", pNode->key, pNode, pCacheObj->numOfElemsInTrash);
}
void taosRemoveFromTrashCan(SCacheObj *pCacheObj, STrashElem *pElem) {
......@@ -549,7 +549,7 @@ void taosTrashCanEmpty(SCacheObj *pCacheObj, bool force) {
}
if (force || (T_REF_VAL_GET(pElem->pData) == 0)) {
uDebug("key:%s %p removed from trash. numOfElem in trash:%d", pElem->pData->key, pElem->pData,
uDebug("key:%s, %p removed from trash. numOfElem in trash:%d", pElem->pData->key, pElem->pData,
pCacheObj->numOfElemsInTrash - 1);
STrashElem *p = pElem;
......@@ -570,8 +570,11 @@ void doCleanupDataCache(SCacheObj *pCacheObj) {
while (taosHashIterNext(pIter)) {
SCacheDataNode *pNode = *(SCacheDataNode **)taosHashIterGet(pIter);
// if (pNode->expiredTime <= expiredTime && T_REF_VAL_GET(pNode) <= 0) {
taosCacheReleaseNode(pCacheObj, pNode);
//}
if (T_REF_VAL_GET(pNode) <= 0) {
taosCacheReleaseNode(pCacheObj, pNode);
} else {
uDebug("key:%s, will not remove from cache, refcnt:%d", pNode->key, T_REF_VAL_GET(pNode));
}
}
taosHashDestroyIter(pIter);
......
......@@ -799,3 +799,13 @@ int tmkdir(const char *path, mode_t mode) {
if (code < 0 && errno == EEXIST) code = 0;
return code;
}
void taosMvDir(char* destDir, char *srcDir) {
char shellCmd[1024+1] = {0};
//(void)snprintf(shellCmd, 1024, "cp -rf %s %s", srcDir, destDir);
(void)snprintf(shellCmd, 1024, "mv %s %s", srcDir, destDir);
tSystem(shellCmd);
uInfo("shell cmd:%s is executed", shellCmd);
}
......@@ -347,6 +347,7 @@ void vnodeRelease(void *pVnodeRaw) {
if (pVnode->status == TAOS_VN_STATUS_DELETING) {
char rootDir[TSDB_FILENAME_LEN] = {0};
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, vgId);
taosMvDir(tsVnodeBakDir, rootDir);
taosRemoveDir(rootDir);
}
......
#!/bin/bash
WORK_DIR=/mnt/root
# Coloured Echoes #
function red_echo { echo -e "\033[31m$@\033[0m"; } #
function green_echo { echo -e "\033[32m$@\033[0m"; } #
......@@ -16,6 +18,17 @@ function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") &&
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
function setMaxTablesPerVnode {
echo "/etc/taos/taos.cfg maxTablesPerVnode will be set to $1"
hasText=`grep "maxTablesPerVnode" /etc/taos/taos.cfg`
if [[ -z "$hasText" ]]; then
echo "maxTablesPerVnode $1" >> /etc/taos/taos.cfg
else
sed -i 's/^maxTablesPerVnode.*$/maxTablesPerVnode '"$1"'/g' /etc/taos/taos.cfg
fi
}
function setMaxConnections {
echo "/etc/taos/taos.cfg maxConnection will be set to $1"
......@@ -27,6 +40,28 @@ function setMaxConnections {
fi
}
function setQDebugFlag {
echo "/etc/taos/taos.cfg qDebugFlag will be set to $1"
hasText=`grep -w "qDebugFlag" /etc/taos/taos.cfg`
if [[ -z "$hasText" ]]; then
echo "qDebugFlag $1" >> /etc/taos/taos.cfg
else
sed -i 's/^qDebugFlag.*$/qDebugFlag '"$1"'/g' /etc/taos/taos.cfg
fi
}
function setDebugFlag {
echo "/etc/taos/taos.cfg DebugFlag will be set to $1"
hasText=`grep -w "DebugFlag" /etc/taos/taos.cfg`
if [[ -z "$hasText" ]]; then
echo "DebugFlag $1" >> /etc/taos/taos.cfg
else
sed -i 's/^DebugFlag.*$/DebugFlag '"$1"'/g' /etc/taos/taos.cfg
fi
}
function setWal {
echo "/etc/taos/taos.cfg walLevel will be set to $1"
......@@ -47,9 +82,10 @@ function collectSysInfo {
}
function buildTDengine {
cd /root/TDengine
echoInfo "Build TDengine"
cd $WORK_DIR/TDengine
git remote update
git remote update > /dev/null
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
LOCAL_COMMIT=`git rev-parse --short @`
......@@ -59,29 +95,17 @@ function buildTDengine {
echo "repo up-to-date"
else
echo "repo need to pull"
git pull
git pull > /dev/null
LOCAL_COMMIT=`git rev-parse --short @`
cd debug
rm -rf *
cmake ..
cmake .. > /dev/null
make > /dev/null
make install
fi
}
function restartTaosd {
systemctl stop taosd
pkill -KILL -x taosd
sleep 10
rm -rf /mnt/var/log/taos/*
rm -rf /mnt/var/lib/taos/*
taosd 2>&1 > /dev/null &
sleep 10
}
function sendReport {
receiver="sdsang@taosdata.com, sangshuduo@gmail.com"
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
......@@ -93,57 +117,90 @@ function sendReport {
(cat - && uuencode perftest-13d-wal1-$today.log perftest-13d-wal1-$today.log)| \
(cat - && uuencode perftest-13d-wal1-report.csv perftest-13d-wal1-report-$today.csv) | \
(cat - && uuencode perftest-13d-wal1-report.png perftest-13d-wal1-report-$today.png) | \
(cat - && uuencode perftest-var10k-int10s-wal1-$today.log perftest-var10k-int10s-wal1-$today.log)| \
(cat - && uuencode perftest-var10k-int10s-wal1-report.csv perftest-var10k-int10s-wal1-report-$today.csv) | \
(cat - && uuencode perftest-var10k-int10s-wal1-report.png perftest-var10k-int10s-wal1-report-$today.png) | \
(cat - && uuencode taosdemo-wal1-$today.log taosdemo-wal1-$today.log) | \
(cat - && uuencode taosdemo-wal1-report.csv taosdemo-wal1-report-$today.csv) | \
(cat - && uuencode taosdemo-rps-wal1-report.csv taosdemo-rps-wal1-report-$today.csv) | \
(cat - && uuencode taosdemo-wal1-report.png taosdemo-wal1-report-$today.png) | \
(cat - && uuencode taosdemo-rps-wal1-report.csv taosdemo-rps-wal1-report-$today.csv) | \
(cat - && uuencode taosdemo-rps-wal1-report.png taosdemo-rps-wal1-report-$today.png) | \
(cat - && uuencode perftest-1d-wal2-$today.log perftest-1d-wal2-$today.log)| \
(cat - && uuencode perftest-1d-wal2-report.csv perftest-1d-wal2-report-$today.csv) | \
(cat - && uuencode perftest-1d-wal2-report.png perftest-1d-wal2-report-$today.png) | \
(cat - && uuencode perftest-13d-wal2-$today.log perftest-13d-wal2-$today.log)| \
(cat - && uuencode perftest-13d-wal2-report.csv perftest-13d-wal2-report-$today.csv) | \
(cat - && uuencode perftest-13d-wal2-report.png perftest-13d-wal2-report-$today.png) | \
(cat - && uuencode perftest-var10k-int10s-wal2-$today.log perftest-var10k-int10s-wal2-$today.log)| \
(cat - && uuencode perftest-var10k-int10s-wal2-report.csv perftest-var10k-int10s-wal2-report-$today.csv) | \
(cat - && uuencode perftest-var10k-int10s-wal2-report.png perftest-var10k-int10s-wal2-report-$today.png) | \
(cat - && uuencode taosdemo-wal2-$today.log taosdemo-wal2-$today.log) | \
(cat - && uuencode taosdemo-wal2-report.csv taosdemo-wal2-report-$today.csv) | \
(cat - && uuencode taosdemo-wal2-report.png taosdemo-wal2-report-$today.png) | \
(cat - && uuencode taosdemo-rps-wal2-report.csv taosdemo-rps-wal2-report-$today.csv) | \
(cat - && uuencode taosdemo-rps-wal2-report.png taosdemo-rps-wal2-report-$today.png) | \
(cat - && uuencode sysinfo.log sysinfo.txt) | \
(cat - && uuencode taos.cfg taos-cfg-$today.txt) | \
ssmtp "${receiver}"
}
today=`date +"%Y%m%d"`
cd /root
echo -e "cron-ran-at-${today}" >> cron.log
cd $WORK_DIR
echo -e "cron-ran-at-${today}" >> $WORK_DIR/cron.log
echoInfo "Build TDengine"
buildTDengine
############################
setMaxConnections 100
setMaxConnections 1000
setMaxTablesPerVnode 6000
setDebugFlag 131
setQDebugFlag 131
############################
setWal "2"
cd /root
./perftest-tsdb-compare-1d.sh "wal2"
cd $WORK_DIR
date >> $WORK_DIR/cron.log
./perftest-taosdemo.sh "wal2"
date >> $WORK_DIR/cron.log
cd /root
./perftest-tsdb-compare-13d.sh "wal2"
cd $WORK_DIR
date >> $WORK_DIR/cron.log
./perftest-tsdb-compare-1d.sh
date >> $WORK_DIR/cron.log
cd /root
./perftest-taosdemo.sh "wal2"
cd $WORK_DIR
date >> $WORK_DIR/cron.log
./perftest-tsdb-compare-13d.sh
date >> $WORK_DIR/cron.log
cd $WORK_DIR
date >> $WORK_DIR/cron.log
./perftest-tsdb-compare-var10k-int10s.sh
date >> $WORK_DIR/cron.log
#############################
setWal "1"
cd /root
./perftest-tsdb-compare-1d.sh "wal1"
cd /root
./perftest-tsdb-compare-13d.sh "wal1"
cd /root
cd $WORK_DIR
date >> $WORK_DIR/cron.log
./perftest-taosdemo.sh "wal1"
date >> $WORK_DIR/cron.log
cd $WORK_DIR
date >> $WORK_DIR/cron.log
./perftest-tsdb-compare-1d.sh
date >> $WORK_DIR/cron.log
cd $WORK_DIR
date >> $WORK_DIR/cron.log
./perftest-tsdb-compare-13d.sh
date >> $WORK_DIR/cron.log
cd $WORK_DIR
date >> $WORK_DIR/cron.log
./perftest-tsdb-compare-var10k-int10s.sh
date >> $WORK_DIR/cron.log
#############################
collectSysInfo
......
#!/bin/bash
WORK_DIR=/mnt/root
walLevel=`grep "^walLevel" /etc/taos/taos.cfg | awk '{print $2}'`
if [[ "$walLevel" -eq "2" ]]; then
walPostfix="wal2"
elif [[ "$walLevel" -eq "1" ]]; then
walPostfix="wal1"
else
echo -e "${RED}wrong walLevel $walLevel found! ${NC}"
exit 1
fi
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
# Coloured Echoes
function red_echo { echo -e "\033[31m$@\033[0m"; }
function green_echo { echo -e "\033[32m$@\033[0m"; }
......@@ -17,13 +32,20 @@ function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; }
function restartTaosd {
echo "Stop taosd"
systemctl stop taosd
pkill -KILL -x taosd
sleep 10
rm -rf /mnt/var/log/taos/*
rm -rf /mnt/var/lib/taos/*
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
rm -rf $logDir/*
rm -rf $dataDir/*
echo "Start taosd"
taosd 2>&1 > /dev/null &
sleep 10
}
......@@ -32,7 +54,7 @@ function runCreateTableOnly {
echoInfo "Restart Taosd"
restartTaosd
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo -n 0 2>&1 | tee taosdemo-$1-$today.log"
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo -n 0 2>&1 | tee taosdemo-$walPostfix-$today.log"
demoCreateTableOnly=`grep "Total:" totaltime.out|awk '{print $2}'`
}
......@@ -40,7 +62,7 @@ function runDeleteTableOnly {
echoInfo "Restart Taosd"
restartTaosd
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo -t 0 -D 1 2>&1 | tee taosdemo-$1-$today.log"
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo -t 0 -D 1 2>&1 | tee taosdemo-$walPostfix-$today.log"
demoDeleteTableOnly=`grep "Total:" totaltime.out|awk '{print $2}'`
}
......@@ -48,41 +70,44 @@ function runCreateTableThenInsert {
echoInfo "Restart Taosd"
restartTaosd
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo 2>&1 | tee -a taosdemo-$1-$today.log"
/usr/bin/time -f "Total: %e" -o totaltime.out bash -c "yes | taosdemo 2>&1 | tee -a taosdemo-$walPostfix-$today.log"
demoTableAndInsert=`grep "Total:" totaltime.out|awk '{print $2}'`
demoRPS=`grep "records\/second" taosdemo-$1-$today.log | tail -n1 | awk '{print $13}'`
demoRPS=`grep "records\/second" taosdemo-$walPostfix-$today.log | tail -n1 | awk '{print $13}'`
}
function generateTaosdemoPlot {
echo "${today} $1, demoCreateTableOnly: ${demoCreateTableOnly}, demoDeleteTableOnly: ${demoDeleteTableOnly}, demoTableAndInsert: ${demoTableAndInsert}" | tee -a taosdemo-$today.log
echo "${today}, ${demoCreateTableOnly}, ${demoDeleteTableOnly}, ${demoTableAndInsert}">> taosdemo-$1-report.csv
echo "${today}, ${demoRPS}" >> taosdemo-rps-$1-report.csv
echo "${today} $walPostfix, demoCreateTableOnly: ${demoCreateTableOnly}, demoDeleteTableOnly: ${demoDeleteTableOnly}, demoTableAndInsert: ${demoTableAndInsert}" | tee -a taosdemo-$today.log
echo "${today}, ${demoCreateTableOnly}, ${demoDeleteTableOnly}, ${demoTableAndInsert}">> taosdemo-$walPostfix-report.csv
echo "${today}, ${demoRPS}" >> taosdemo-rps-$walPostfix-report.csv
csvLines=`cat taosdemo-$1-report.csv | wc -l`
csvLines=`cat taosdemo-$walPostfix-report.csv | wc -l`
if [ "$csvLines" -gt "10" ]; then
sed -i '1d' taosdemo-$1-report.csv
sed -i '1d' taosdemo-$walPostfix-report.csv
fi
csvLines=`cat taosdemo-rps-$1-report.csv | wc -l`
csvLines=`cat taosdemo-rps-$walPostfix-report.csv | wc -l`
if [ "$csvLines" -gt "10" ]; then
sed -i '1d' taosdemo-rps-$1-report.csv
sed -i '1d' taosdemo-rps-$walPostfix-report.csv
fi
gnuplot -e "filename='taosdemo-$1-report'" -p taosdemo-csv2png.gnuplot
gnuplot -e "filename='taosdemo-rps-$1-report'" -p taosdemo-rps-csv2png.gnuplot
gnuplot -e "filename='taosdemo-$walPostfix-report'" -p taosdemo-csv2png.gnuplot
gnuplot -e "filename='taosdemo-rps-$walPostfix-report'" -p taosdemo-rps-csv2png.gnuplot
}
today=`date +"%Y%m%d"`
cd /root
cd $WORK_DIR
echoInfo "Test Create Table Only "
runCreateTableOnly $1
runCreateTableOnly
echoInfo "Test Create Table then Insert data"
runDeleteTableOnly $1
runDeleteTableOnly
echoInfo "Test Create Table then Insert data"
runCreateTableThenInsert $1
runCreateTableThenInsert
echoInfo "Generate plot for taosdemo"
generateTaosdemoPlot $1
echoInfo "End of TaosDemo Test"
generateTaosdemoPlot
tar czf $WORK_DIR/taos-log-taosdemo-$today.tar.gz $logDir/*
echoInfo "End of TaosDemo Test" | tee -a $WORK_DIR/cron.log
#!/bin/bash
WORK_DIR=/mnt/root
TSDB_CMP_DIR=timeseriesdatabase-comparisons/build/tsdbcompare
walLevel=`grep "^walLevel" /etc/taos/taos.cfg | awk '{print $2}'`
if [[ "$walLevel" -eq "2" ]]; then
walPostfix="wal2"
elif [[ "$walLevel" -eq "1" ]]; then
walPostfix="wal1"
else
echo -e "${RED}wrong walLevel $walLevel found! ${NC}"
exit 1
fi
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
# Coloured Echoes #
function red_echo { echo -e "\033[31m$@\033[0m"; } #
function green_echo { echo -e "\033[32m$@\033[0m"; } #
......@@ -17,13 +33,20 @@ function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
function restartTaosd {
echo "Stop taosd"
systemctl stop taosd
pkill -KILL -x taosd
sleep 10
rm -rf /mnt/var/log/taos/*
rm -rf /mnt/var/lib/taos/*
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
rm -rf $logDir/*
rm -rf $dataDir/*
echo "Start taosd"
taosd 2>&1 > /dev/null &
sleep 10
}
......@@ -32,27 +55,30 @@ function runPerfTest13d {
echoInfo "Restart Taosd"
restartTaosd
cd /home/taos/tliu/timeseriesdatabase-comparisons/build/tsdbcompare
./runreal-13d-csv.sh $1 2>&1 | tee /root/perftest-13d-$1-$today.log
cd $WORK_DIR/$TSDB_CMP_DIR
./runTDengine.sh -d 13 -w -q 2>&1 | tee $WORK_DIR/perftest-13d-$walPostfix-$today.log
}
function generatePerfPlot13d {
cd /root
cd $WORK_DIR
csvLines=`cat perftest-13d-$1-report.csv | wc -l`
csvLines=`cat perftest-13d-$walPostfix-report.csv | wc -l`
if [ "$csvLines" -gt "10" ]; then
sed -i '1d' perftest-13d-$1-report.csv
sed -i '1d' perftest-13d-$walPostfix-report.csv
fi
gnuplot -e "filename='perftest-13d-$1-report'" -p perftest-csv2png.gnuplot
gnuplot -e "filename='perftest-13d-$walPostfix-report'" -p perftest-csv2png.gnuplot
}
today=`date +"%Y%m%d"`
cd /root
cd $WORK_DIR
echoInfo "run Performance Test with 13 days data"
runPerfTest13d $1
echoInfo "Generate plot of 13 days data"
generatePerfPlot13d $1
echoInfo "End of TSDB-Compare 13-days-data Test"
tar czf $WORK_DIR/taos-log-13d-$today.tar.gz $logDir/*
echoInfo "End of TSDB-Compare 13-days-data Test" | tee -a $WORK_DIR/cron.log
#!/bin/bash
WORK_DIR=/mnt/root
TSDB_CMP_DIR=timeseriesdatabase-comparisons/build/tsdbcompare
walLevel=`grep "^walLevel" /etc/taos/taos.cfg | awk '{print $2}'`
if [[ "$walLevel" -eq "2" ]]; then
walPostfix="wal2"
elif [[ "$walLevel" -eq "1" ]]; then
walPostfix="wal1"
else
echo -e "${RED}wrong walLevel $walLevel found! ${NC}"
exit 1
fi
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
# Coloured Echoes #
function red_echo { echo -e "\033[31m$@\033[0m"; } #
function green_echo { echo -e "\033[32m$@\033[0m"; } #
......@@ -17,13 +33,20 @@ function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
function restartTaosd {
echo "Stop taosd"
systemctl stop taosd
pkill -KILL -x taosd
sleep 10
rm -rf /mnt/var/log/taos/*
rm -rf /mnt/var/lib/taos/*
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
rm -rf $logDir/*
rm -rf $dataDir/*
echo "Start taosd"
taosd 2>&1 > /dev/null &
sleep 10
}
......@@ -32,27 +55,30 @@ function runPerfTest1d {
echoInfo "Restart Taosd"
restartTaosd
cd /home/taos/tliu/timeseriesdatabase-comparisons/build/tsdbcompare
./runreal-1d-csv.sh $1 2>&1 | tee /root/perftest-1d-$1-$today.log
cd $WORK_DIR/$TSDB_CMP_DIR
./runTDengine.sh -d 1 -w -q 2>&1 | tee $WORK_DIR/perftest-1d-$walPostfix-$today.log
}
function generatePerfPlot1d {
cd /root
cd $WORK_DIR
csvLines=`cat perftest-1d-$1-report.csv | wc -l`
csvLines=`cat perftest-1d-$walPostfix-report.csv | wc -l`
if [ "$csvLines" -gt "10" ]; then
sed -i '2d' perftest-1d-$1-report.csv
sed -i '1d' perftest-1d-$walPostfix-report.csv
fi
gnuplot -e "filename='perftest-1d-$1-report'" -p perftest-csv2png.gnuplot
gnuplot -e "filename='perftest-1d-$walPostfix-report'" -p perftest-csv2png.gnuplot
}
today=`date +"%Y%m%d"`
cd /root
cd $WORK_DIR
echoInfo "run Performance Test with 1 day data"
runPerfTest1d $1
runPerfTest1d
echoInfo "Generate plot of 1 day data"
generatePerfPlot1d $1
echoInfo "End of TSDB-Compare 1-day-data Test"
generatePerfPlot1d
tar czf $WORK_DIR/taos-log-1d-$today.tar.gz $logDir/*
echoInfo "End of TSDB-Compare 1-day-data Test" | tee -a $WORK_DIR/cron.log
#!/bin/bash
WORK_DIR=/mnt/root
TSDB_CMP_DIR=timeseriesdatabase-comparisons/build/tsdbcompare
walLevel=`grep "^walLevel" /etc/taos/taos.cfg | awk '{print $2}'`
if [[ "$walLevel" -eq "2" ]]; then
walPostfix="wal2"
elif [[ "$walLevel" -eq "1" ]]; then
walPostfix="wal1"
else
echo -e "${RED}wrong walLevel $walLevel found! ${NC}"
exit 1
fi
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
# Coloured Echoes #
function red_echo { echo -e "\033[31m$@\033[0m"; } #
function green_echo { echo -e "\033[32m$@\033[0m"; } #
function yellow_echo { echo -e "\033[33m$@\033[0m"; } #
function white_echo { echo -e "\033[1;37m$@\033[0m"; } #
# Coloured Printfs #
function red_printf { printf "\033[31m$@\033[0m"; } #
function green_printf { printf "\033[32m$@\033[0m"; } #
function yellow_printf { printf "\033[33m$@\033[0m"; } #
function white_printf { printf "\033[1;37m$@\033[0m"; } #
# Debugging Outputs #
function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } #
function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } #
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
function restartTaosd {
echo "Stop taosd"
systemctl stop taosd
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
rm -rf $logDir/*
rm -rf $dataDir/*
echo "Start taosd"
taosd 2>&1 > /dev/null &
sleep 10
}
function runPerfTestVar10K {
echoInfo "Restart Taosd"
restartTaosd
cd $WORK_DIR/$TSDB_CMP_DIR
./runTDengine.sh -v 10000 -i 100 -w -q 2>&1 | tee $WORK_DIR/perftest-var10k-int100s-$walPostfix-$today.log
}
function generatePerfPlotVar10K {
cd $WORK_DIR
csvLines=`cat perftest-var10k-int100s-$walPostfix-report.csv | wc -l`
if [ "$csvLines" -gt "10" ]; then
sed -i '1d' perftest-var10k-int100s-$walPostfix-report.csv
fi
gnuplot -e "filename='perftest-var10k-int100s-$walPostfix-report'" -p perftest-csv2png.gnuplot
}
today=`date +"%Y%m%d"`
cd $WORK_DIR
echoInfo "run Performance Test with 10K tables data"
runPerfTestVar10K
echoInfo "Generate plot of 10K tables data"
generatePerfPlotVar10K
tar czf $WORK_DIR/taos-log-var10k-int100s-$today.tar.gz $logDir/*
echoInfo "End of TSDB-Compare var10k-int100s-tables-data Test" | tee -a $WORK_DIR/cron.log
#!/bin/bash
WORK_DIR=/mnt/root
TSDB_CMP_DIR=timeseriesdatabase-comparisons/build/tsdbcompare
walLevel=`grep "^walLevel" /etc/taos/taos.cfg | awk '{print $2}'`
if [[ "$walLevel" -eq "2" ]]; then
walPostfix="wal2"
elif [[ "$walLevel" -eq "1" ]]; then
walPostfix="wal1"
else
echo -e "${RED}wrong walLevel $walLevel found! ${NC}"
exit 1
fi
logDir=`grep "^logDir" /etc/taos/taos.cfg | awk '{print $2}'`
dataDir=`grep "^dataDir" /etc/taos/taos.cfg | awk '{print $2}'`
# Coloured Echoes #
function red_echo { echo -e "\033[31m$@\033[0m"; } #
function green_echo { echo -e "\033[32m$@\033[0m"; } #
function yellow_echo { echo -e "\033[33m$@\033[0m"; } #
function white_echo { echo -e "\033[1;37m$@\033[0m"; } #
# Coloured Printfs #
function red_printf { printf "\033[31m$@\033[0m"; } #
function green_printf { printf "\033[32m$@\033[0m"; } #
function yellow_printf { printf "\033[33m$@\033[0m"; } #
function white_printf { printf "\033[1;37m$@\033[0m"; } #
# Debugging Outputs #
function white_brackets { local args="$@"; white_printf "["; printf "${args}"; white_printf "]"; } #
function echoInfo { local args="$@"; white_brackets $(green_printf "INFO") && echo " ${args}"; } #
function echoWarn { local args="$@"; echo "$(white_brackets "$(yellow_printf "WARN")" && echo " ${args}";)" 1>&2; } #
function echoError { local args="$@"; echo "$(white_brackets "$(red_printf "ERROR")" && echo " ${args}";)" 1>&2; } #
function restartTaosd {
echo "Stop taosd"
systemctl stop taosd
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
rm -rf $logDir/*
rm -rf $dataDir/*
echo "Start taosd"
taosd 2>&1 > /dev/null &
sleep 10
}
function runPerfTestVar10K {
echoInfo "Restart Taosd"
restartTaosd
cd $WORK_DIR/$TSDB_CMP_DIR
./runTDengine.sh -v 10000 -w -q 2>&1 | tee $WORK_DIR/perftest-var10k-int10s-$walPostfix-$today.log
}
function generatePerfPlotVar10K {
cd $WORK_DIR
csvLines=`cat perftest-var10k-int10s-$walPostfix-report.csv | wc -l`
if [ "$csvLines" -gt "10" ]; then
sed -i '1d' perftest-var10k-int10s-$walPostfix-report.csv
fi
gnuplot -e "filename='perftest-var10k-int10s-$walPostfix-report'" -p perftest-csv2png.gnuplot
}
today=`date +"%Y%m%d"`
cd $WORK_DIR
echoInfo "run Performance Test with 10K tables data"
runPerfTestVar10K
echoInfo "Generate plot of 10K tables data"
generatePerfPlotVar10K
tar czf $WORK_DIR/taos-log-var10k-int10s-$today.tar.gz $logDir/*
echoInfo "End of TSDB-Compare var10k-int10s-tables-data Test" | tee -a $WORK_DIR/cron.log
#!/bin/bash
#set -x
WORK_DIR=/mnt/root
DATA_DIR=/mnt/data
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
# default value
DEFAULT_BATCH=5000
DEFAULT_DAYS=1
DEFAULT_INTERVAL=1
DEFAULT_SCALEVAR=10
DEFAULT_DOPREPARE=false
DEFAULT_DOWRITE=false
DEFAULT_DOQUERY=false
# function
function do_prepare {
echo
echo "---------------Generating Data-----------------"
echo
echo
echo "Prepare data for InfluxDB...."
echo "bin/bulk_data_gen -seed 123 -format influx-bulk -sampling-interval $interval_s \
-scale-var $scalevar -use-case devops -timestamp-start $TIME_START \
-timestamp-end $TIME_END > $DATA_FILE"
bin/bulk_data_gen -seed 123 -format influx-bulk -sampling-interval $interval_s \
-scale-var $scalevar -use-case devops -timestamp-start $TIME_START \
-timestamp-end $TIME_END > $DATA_FILE
}
function do_write {
echo "cat $DATA_FILE | bin/bulk_load_influx \
--batch-size=$batch --workers=20 --urls=http://172.15.1.5:8086 | grep loaded"
INFLUXRES=`cat $DATA_FILE | bin/bulk_load_influx \
--batch-size=$batch --workers=20 --urls="http://172.15.1.5:8086" | grep loaded`
echo -e "${GREEN}InfluxDB writing result:${NC}"
echo -e "${GREEN}$INFLUXRES${NC}"
DATA=`echo $INFLUXRES|awk '{print($2)}'`
TMP=`echo $INFLUXRES|awk '{print($5)}'`
IFWTM=`echo ${TMP%s*}`
}
function do_query {
echo
echo "------------------Querying Data-----------------"
echo
echo
echo "start query test, query max from 8 hosts group by 1 hour, InfluxDB"
echo
#Test case 1
#测试用例1,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
# a,b,c,d,e,f,g,h are random 8 numbers.
echo "IFQS1=bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-all -scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall"
IFQS1=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-all -scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb -urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 1 result:${NC}"
echo -e "${GREEN}$IFQS1${NC}"
TMP=`echo $IFQS1|awk '{print($4)}'`
IFQ1=`echo ${TMP%s*}`
#Test case 2
#测试用例2,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1小时为粒度,查询每1小时的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
# a,b,c,d,e,f,g,h are random 8 numbers
echo "IFQS2=bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-allbyhr -scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb -urls=http://172.15.1.5:8086 -workers 50 -print-interval 0|grep wall"
IFQS2=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-allbyhr \
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb \
-urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 2 result:${NC}"
echo -e "${GREEN}$IFQS2${NC}"
TMP=`echo $IFQS2|awk '{print($4)}'`
IFQ2=`echo ${TMP%s*}`
#Test case 3
#测试用例3,测试用例3,随机查询12个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以10分钟为粒度,查询每10分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
echo "IFQS3=bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-12-hr -scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb -urls=http://172.15.1.5:8086 -workers 50 -print-interval 0|grep wall"
IFQS3=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-12-hr \
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb \
-urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 3 result:${NC}"
echo -e "${GREEN}$IFQS3${NC}"
TMP=`echo $IFQS3|awk '{print($4)}'`
IFQ3=`echo ${TMP%s*}`
#Test case 4
#测试用例4,随机查询1个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1分钟为粒度,查询每1分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
echo "IFQS4=bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-1-hr -scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb -urls=http://172.15.1.5:8086 -workers 50 -print-interval 0|grep wall"
IFQS4=`bin/bulk_query_gen -seed 123 -format influx-http -query-type 8-host-1-hr \
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_influxdb \
-urls="http://172.15.1.5:8086" -workers 50 -print-interval 0|grep wall`
echo -e "${GREEN}InfluxDB query test case 4 result:${NC}"
echo -e "${GREEN}$IFQS4${NC}"
TMP=`echo $IFQS4|awk '{print($4)}'`
IFQ4=`echo ${TMP%s*}`
}
batch=$DEFAULT_BATCH
days=$DEFAULT_DAYS
interval=$DEFAULT_INTERVAL
scalevar=$DEFAULT_SCALEVAR
doprepare=$DEFAULT_DOPREPARE
dowrite=$DEFAULT_DOWRITE
doquery=$DEFAULT_DOQUERY
help="$(basename "$0") [-h] [-b batchsize] [-d data-of-days] [-i interval] [-v scalevar] [-p false for don't prepare] [-w false for don't do write] [-q false for don't do query]"
while getopts ':b:d:i:v:pwqh' flag; do
case "${flag}" in
b) batch=${OPTARG};;
d) days=${OPTARG};;
i) interval=${OPTARG};;
v) scalevar=${OPTARG};;
p) doprepare=${OPTARG};;
w) dowrite=${OPTARG};;
q) doquery=${OPTARG};;
:) echo -e "{RED}Missing argument!{NC}"
echo "$help"
exit 1
;;
h) echo "$help"
exit 1
;;
esac
done
if [[ $scalevar -eq 10000 ]]
then
TIME_START="2018-01-01T00:00:00Z"
TIME_END="2018-01-02T00:00:00Z"
if [[ $interval -eq 100 ]]
then
interval_s=100s
DATA_FILE=$DATA_DIR/influxdb-var10k-int100s.dat
RECORD_CSV_FILE=$WORK_DIR/perftest-influxdb-report-var10k-int100s.csv
else
interval_s=10s
DATA_FILE=$DATA_DIR/influxdb-var10k-int10s.dat
RECORD_CSV_FILE=$WORK_DIR/perftest-influxdb-report-var10k-int10s.csv
fi
else
if [[ $days -eq 1 ]]
then
TIME_START="2018-01-01T00:00:00Z"
TIME_END="2018-01-02T00:00:00Z"
DATA_FILE=$DATA_DIR/influxdb-1d.dat
RECORD_CSV_FILE=$WORK_DIR/perftest-influxdb-report-1d.csv
elif [[ $days -eq 13 ]]
then
TIME_START="2018-01-01T00:00:00Z"
TIME_END="2018-01-14T00:00:00Z"
DATA_FILE=$DATA_DIR/influxdb-13d.dat
RECORD_CSV_FILE=$WORK_DIR/perftest-influxdb-report-13d.csv
else
echo -e "{RED} don't support input $days{NC}"
exit 1
fi
interval_s=${interval}s
fi
echo "TIME_START: $TIME_START, TIME_END: $TIME_END, DATA_FILE: $DATA_FILE"
echo "doprepare: $doprepare, dowrite: $dowrite, doquery: $doquery"
if $doprepare;
then
do_prepare
fi
docker network create --ip-range 172.15.1.255/24 --subnet 172.15.1.1/16 tsdbcomp >>/dev/null 2>&1
INFLUX=`docker run -d -p 8086:8086 --net tsdbcomp --ip 172.15.1.5 influxdb` >>/dev/null 2>&1
sleep 10
if $dowrite;
then
echo -e "Start test InfluxDB writting, result in ${GREEN}Green line${NC}"
do_write
fi
if $doquery;
then
echo -e "Start test InfluxDB query, result in ${GREEN}Green line${NC}"
do_query
fi
echo
echo
echo "======================================================"
echo " tsdb performance comparision "
echo "======================================================"
if $dowrite;
then
echo -e " Writing $DATA records test takes: "
printf " InfluxDB | %-4.5f Seconds \n" $IFWTM
echo "------------------------------------------------------"
fi
if $doquery;
then
echo " Query test cases: "
echo " case 1: select the max(value) from all data "
echo " filtered out 8 hosts "
echo " Query test case 1 takes: "
printf " InfluxDB | %-4.5f Seconds \n" $IFQ1
echo "------------------------------------------------------"
echo " case 2: select the max(value) from all data "
echo " filtered out 8 hosts with an interval of 1 hour "
echo " case 2 takes: "
printf " InfluxDB | %-4.5f Seconds \n" $IFQ2
echo "------------------------------------------------------"
echo " case 3: select the max(value) from random 12 hours"
echo " data filtered out 8 hosts with an interval of 10 min "
echo " filtered out 8 hosts interval(1h) "
echo " case 3 takes: "
printf " InfluxDB | %-4.5f Seconds \n" $IFQ3
echo "------------------------------------------------------"
echo " case 4: select the max(value) from random 1 hour data "
echo " data filtered out 8 hosts with an interval of 1 min "
echo " case 4 takes: "
printf " InfluxDB | %-4.5f Seconds \n" $IFQ4
echo "------------------------------------------------------"
fi
docker stop $INFLUX >>/dev/null 2>&1
docker container rm -f $INFLUX >>/dev/null 2>&1
docker network rm tsdbcomp >>/dev/null 2>&1
today=`date +"%Y%m%d"`
echo "${today}, ${IFWTM}, ${IFQ1}, ${IFQ2}, ${IFQ3}, ${IFQ4}" >> $RECORD_CSV_FILE
#!/bin/bash
#set -x
WORK_DIR=/mnt/root
DATA_DIR=/mnt/data
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
# default value
DEFAULT_BATCH=5000
DEFAULT_DAYS=1
DEFAULT_INTERVAL=1
DEFAULT_SCALEVAR=10
DEFAULT_DOPREPARE=false
DEFAULT_DOWRITE=false
DEFAULT_DOQUERY=false
# function
function do_prepare {
echo
echo "---------------Generating Data-----------------"
echo
echo
echo "Prepare data for TDengine...."
# bin/bulk_data_gen -seed 123 -format tdengine -tdschema-file config/TDengineSchema.toml -scale-var 100 -use-case devops -timestamp-start "2018-01-01T00:00:00Z" -timestamp-end "2018-01-02T00:00:00Z" > $DATA_DIR/tdengine.dat
echo "bin/bulk_data_gen -seed 123 -format tdengine -sampling-interval $interval_s \
-tdschema-file config/TDengineSchema.toml -scale-var $scalevar \
-use-case devops -timestamp-start $TIME_START \
-timestamp-end $TIME_END \
> $DATA_FILE"
bin/bulk_data_gen -seed 123 -format tdengine -sampling-interval $interval_s \
-tdschema-file config/TDengineSchema.toml -scale-var $scalevar \
-use-case devops -timestamp-start $TIME_START \
-timestamp-end $TIME_END \
> $DATA_FILE
}
function do_write {
echo "TDENGINERES=cat $DATA_FILE |bin/bulk_load_tdengine --url 127.0.0.1:0 \
--batch-size $batch -do-load -report-tags n1 -workers 20 -fileout=false| grep loaded"
TDENGINERES=`cat $DATA_FILE |bin/bulk_load_tdengine --url 127.0.0.1:0 \
--batch-size $batch -do-load -report-tags n1 -workers 20 -fileout=false| grep loaded`
echo
echo -e "${GREEN}TDengine writing result:${NC}"
echo -e "${GREEN}$TDENGINERES${NC}"
DATA=`echo $TDENGINERES|awk '{print($2)}'`
TMP=`echo $TDENGINERES|awk '{print($5)}'`
TDWTM=`echo ${TMP%s*}`
}
function do_query {
echo
echo "------------------Querying Data-----------------"
echo
echo
echo "start query test, query max from 8 hosts group by 1 hour, TDengine"
echo
#Test case 1
#测试用例1,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') ;
# a,b,c,d,e,f,g,h are random 8 numbers.
echo "TDQS1=bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-all \
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
-urls=http://127.0.0.1:6020 -workers 50 -print-interval 0|grep wall"
TDQS1=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-all \
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
-urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 1 result:${NC}"
echo -e "${GREEN}$TDQS1${NC}"
TMP=`echo $TDQS1|awk '{print($4)}'`
TDQ1=`echo ${TMP%s*}`
#Test case 2
#测试用例2,查询所有数据中,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1小时为粒度,查询每1小时的最大值。
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') interval(1h);
# a,b,c,d,e,f,g,h are random 8 numbers
echo "TDQS2=bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-allbyhr \
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
-urls=http://127.0.0.1:6020 -workers 50 -print-interval 0|grep wall"
TDQS2=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-allbyhr \
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
-urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 2 result:${NC}"
echo -e "${GREEN}$TDQS2${NC}"
TMP=`echo $TDQS2|awk '{print($4)}'`
TDQ2=`echo ${TMP%s*}`
#Test case 3
#测试用例3,测试用例3,随机查询12个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以10分钟为粒度,查询每10分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =12 hour
echo "TDQS3=bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-12-hr \
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
-urls=http://127.0.0.1:6020 -workers 50 -print-interval 0|grep wall"
TDQS3=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-12-hr \
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
-urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 3 result:${NC}"
echo -e "${GREEN}$TDQS3${NC}"
TMP=`echo $TDQS3|awk '{print($4)}'`
TDQ3=`echo ${TMP%s*}`
#Test case 4
#测试用例4,随机查询1个小时的数据,用8个hostname标签进行匹配,匹配出这8个hostname对应的模拟服务器CPU数据中的usage_user这个监控数据,以1分钟为粒度,查询每1分钟的最大值
#select max(usage_user) from cpu where(hostname='host_a' and hostname='host_b'and hostname='host_c'and hostname='host_d'and hostname='host_e'and hostname='host_f' and hostname='host_g'and hostname='host_h') and time >x and time <y interval(10m);
# a,b,c,d,e,f,g,h are random 8 numbers, y-x =1 hours
echo "TDQS4=bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-1-hr \
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
-urls=http://127.0.0.1:6020 -workers 50 -print-interval 0|grep wall"
TDQS4=`bin/bulk_query_gen -seed 123 -format tdengine -query-type 8-host-1-hr \
-scale-var $scalevar -queries 1000 | bin/query_benchmarker_tdengine \
-urls="http://127.0.0.1:6020" -workers 50 -print-interval 0|grep wall`
echo
echo -e "${GREEN}TDengine query test case 4 result:${NC}"
echo -e "${GREEN}$TDQS4${NC}"
TMP=`echo $TDQS4|awk '{print($4)}'`
TDQ4=`echo ${TMP%s*}`
}
batch=$DEFAULT_BATCH
days=$DEFAULT_DAYS
interval=$DEFAULT_INTERVAL
scalevar=$DEFAULT_SCALEVAR
doprepare=$DEFAULT_DOPREPARE
dowrite=$DEFAULT_DOWRITE
doquery=$DEFAULT_DOQUERY
help="$(basename "$0") [-h] [-b batchsize] [-d data-of-days] [-i interval] [-v scalevar] [-p false for don't prepare] [-w false for don't do write] [-q false for don't do query]"
while getopts ':b:d:i:v:pwqh' flag; do
case "${flag}" in
b) batch=${OPTARG};;
d) days=${OPTARG};;
i) interval=${OPTARG};;
v) scalevar=${OPTARG};;
p) doprepare=${OPTARG};;
w) dowrite=${OPTARG};;
q) doquery=${OPTARG};;
:) echo -e "{RED}Missing argument!{NC}"
echo "$help"
exit 1
;;
h) echo "$help"
exit 1
;;
esac
done
walLevel=`grep "^walLevel" /etc/taos/taos.cfg | awk '{print $2}'`
if [[ "$walLevel" -eq "2" ]]; then
walPostfix="wal2"
elif [[ "$walLevel" -eq "1" ]]; then
walPostfix="wal1"
else
echo -e "${RED}wrong walLevel $walLevel found! ${NC}"
exit 1
fi
echo -e "${GREEN} $walPostfix found! ${NC}"
if [[ $scalevar -eq 10000 ]]
then
TIME_START="2018-01-01T00:00:00Z"
TIME_END="2018-01-02T00:00:00Z"
if [[ $interval -eq 100 ]]
then
interval_s=100s
DATA_FILE=$DATA_DIR/tdengine-var10k-int100s.dat
RECORD_CSV_FILE=$WORK_DIR/perftest-var10k-int100s-$walPostfix-report.csv
else
interval_s=10s
DATA_FILE=$DATA_DIR/tdengine-var10k-int10s.dat
RECORD_CSV_FILE=$WORK_DIR/perftest-var10k-int10s-$walPostfix-report.csv
fi
else
if [[ $days -eq 1 ]]
then
TIME_START="2018-01-01T00:00:00Z"
TIME_END="2018-01-02T00:00:00Z"
DATA_FILE=$DATA_DIR/tdengine-1d.dat
RECORD_CSV_FILE=$WORK_DIR/perftest-1d-$walPostfix-report.csv
elif [[ $days -eq 13 ]]
then
TIME_START="2018-01-01T00:00:00Z"
TIME_END="2018-01-14T00:00:00Z"
DATA_FILE=$DATA_DIR/tdengine-13d.dat
RECORD_CSV_FILE=$WORK_DIR/perftest-13d-$walPostfix-report.csv
else
echo -e "{RED} don't support input $days{NC}"
exit 1
fi
interval_s=${interval}s
fi
echo "TIME_START: $TIME_START, TIME_END: $TIME_END, DATA_FILE: $DATA_FILE"
echo "doprepare: $doprepare, dowrite: $dowrite, doquery: $doquery"
if $doprepare;
then
do_prepare
fi
echo
if $dowrite;
then
echo -e "Start test TDengine writting, result in ${GREEN}Green line${NC}"
do_write
fi
if $doquery;
then
echo -e "Start test TDengine query, result in ${GREEN}Green line${NC}"
do_query
fi
echo
echo
echo "======================================================"
echo " tsdb performance comparision "
echo "======================================================"
if $dowrite;
then
echo -e " Writing $DATA records test takes: "
printf " TDengine | %-4.5f Seconds \n" $TDWTM
echo "------------------------------------------------------"
fi
if $doquery;
then
echo " Query test cases: "
echo " case 1: select the max(value) from all data "
echo " filtered out 8 hosts "
echo " Query test case 1 takes: "
printf " TDengine | %-4.5f Seconds \n" $TDQ1
echo "------------------------------------------------------"
echo " case 2: select the max(value) from all data "
echo " filtered out 8 hosts with an interval of 1 hour "
echo " case 2 takes: "
printf " TDengine | %-4.5f Seconds \n" $TDQ2
echo "------------------------------------------------------"
echo " case 3: select the max(value) from random 12 hours"
echo " data filtered out 8 hosts with an interval of 10 min "
echo " filtered out 8 hosts interval(1h) "
echo " case 3 takes: "
printf " TDengine | %-4.5f Seconds \n" $TDQ3
echo "------------------------------------------------------"
echo " case 4: select the max(value) from random 1 hour data "
echo " data filtered out 8 hosts with an interval of 1 min "
echo " case 4 takes: "
printf " TDengine | %-4.5f Seconds \n" $TDQ4
echo "------------------------------------------------------"
fi
#bulk_query_gen/bulk_query_gen -format influx-http -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_influxdb/query_benchmarker_influxdb -urls="http://172.26.89.231:8086"
#bulk_query_gen/bulk_query_gen -format tdengine -query-type 1-host-1-hr -scale-var 10 -queries 1000 | query_benchmarker_tdengine/query_benchmarker_tdengine -urls="http://172.26.89.231:6020"
today=`date +"%Y%m%d"`
echo "${today}, ${TDWTM}, ${TDQ1}, ${TDQ2}, ${TDQ3}, ${TDQ4}" >> $RECORD_CSV_FILE
#!/user/bin/gnuplot
reset
set terminal png
set title filename font ",20"
set ylabel "Time in Seconds"
set xdata time
set timefmt "%Y%m%d"
set format x "%Y-%m-%d"
set xlabel "Date"
set style data linespoints
set terminal pngcairo size 1024,768 enhanced font 'Segoe UI, 10'
set output filename . '.png'
set datafile separator ','
set key reverse Left outside
set grid
plot filename . '.csv' using 1:2 title "Request Per Second"
......@@ -112,8 +112,7 @@ class TDTestCase:
tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.id, stb_p.dscrption, stb_p.pressure from stb_p, stb_t where stb_p.ts=stb_t.ts and stb_p.id = stb_t.id")
tdSql.checkRows(6)
tdSql.query("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.pid, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
tdSql.checkRows(2)
tdSql.error("select stb_t.ts, stb_t.dscrption, stb_t.temperature, stb_t.pid, stb_p.id, stb_p.dscrption, stb_p.pressure,stb_v.velocity from stb_p, stb_t, stb_v where stb_p.ts=stb_t.ts and stb_p.ts=stb_v.ts and stb_p.id = stb_t.id")
def stop(self):
tdSql.close()
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
import threading
import time
from datetime import datetime
class MetadataQuery:
def initConnection(self):
self.tables = 100000
self.records = 10
self.numOfTherads = 10
self.ts = 1537146000000
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
self.config = "/etc/taos"
def connectDB(self):
self.conn = taos.connect(
self.host,
self.user,
self.password,
self.config)
return self.conn.cursor()
def createStable(self):
print("================= Create stable meters =================")
cursor = self.connectDB()
cursor.execute("drop database if exists test")
cursor.execute("create database test")
cursor.execute("use test")
cursor.execute('''create table if not exists meters (ts timestamp, speed int) tags(
tgcol1 tinyint, tgcol2 smallint, tgcol3 int, tgcol4 bigint, tgcol5 float, tgcol6 double, tgcol7 bool, tgcol8 binary(20), tgcol9 nchar(20),
tgcol10 tinyint, tgcol11 smallint, tgcol12 int, tgcol13 bigint, tgcol14 float, tgcol15 double, tgcol16 bool, tgcol17 binary(20), tgcol18 nchar(20),
tgcol19 tinyint, tgcol20 smallint, tgcol21 int, tgcol22 bigint, tgcol23 float, tgcol24 double, tgcol25 bool, tgcol26 binary(20), tgcol27 nchar(20),
tgcol28 tinyint, tgcol29 smallint, tgcol30 int, tgcol31 bigint, tgcol32 float, tgcol33 double, tgcol34 bool, tgcol35 binary(20), tgcol36 nchar(20),
tgcol37 tinyint, tgcol38 smallint, tgcol39 int, tgcol40 bigint, tgcol41 float, tgcol42 double, tgcol43 bool, tgcol44 binary(20), tgcol45 nchar(20),
tgcol46 tinyint, tgcol47 smallint, tgcol48 int, tgcol49 bigint, tgcol50 float, tgcol51 double, tgcol52 bool, tgcol53 binary(20), tgcol54 nchar(20))''')
cursor.close()
self.conn.close()
def createTablesAndInsertData(self, threadID):
cursor = self.connectDB()
cursor.execute("use test")
base = threadID * self.tables
tablesPerThread = int (self.tables / self.numOfTherads)
for i in range(tablesPerThread):
cursor.execute(
'''create table t%d using meters tags(
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d',
%d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d')''' %
(base + i + 1,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100,
(base + i) %100, (base + i) %10000, (base + i) %1000000, (base + i) %100000000, (base + i) %100 * 1.1, (base + i) %100 * 2.3, (base + i) %2, (base + i) %100, (base + i) %100))
for j in range(self.records):
cursor.execute(
"insert into t%d values(%d, %d)" %
(base + i + 1, self.ts + j, j))
cursor.close()
self.conn.close()
def queryData(self, query):
cursor = self.connectDB()
cursor.execute("use test")
print("================= query tag data =================")
startTime = datetime.now()
cursor.execute(query)
cursor.fetchall()
endTime = datetime.now()
print(
"Query time for the above query is %d seconds" %
(endTime - startTime).seconds)
cursor.close()
self.conn.close()
if __name__ == '__main__':
t = MetadataQuery()
t.initConnection()
t.createStable()
print(
"================= Create %d tables and insert %d records into each table =================" %
(t.tables, t.records))
startTime = datetime.now()
for i in range(t.numOfTherads):
thread = threading.Thread(
target=t.createTablesAndInsertData, args=(i,))
thread.start()
thread.join()
endTime = datetime.now()
diff = (endTime - startTime).seconds
print(
"spend %d seconds to create %d tables and insert %d records into each table" %
(diff, t.tables, t.records))
query = '''select tgcol1, tgcol2, tgcol3, tgcol4, tgcol5, tgcol6, tgcol7, tgcol8, tgcol9,
tgcol10, tgcol11, tgcol12, tgcol13, tgcol14, tgcol15, tgcol16, tgcol17, tgcol18,
tgcol19, tgcol20, tgcol21, tgcol22, tgcol23, tgcol24, tgcol25, tgcol26, tgcol27,
tgcol28, tgcol29, tgcol30, tgcol31, tgcol32, tgcol33, tgcol34, tgcol35, tgcol36,
tgcol37, tgcol38, tgcol39, tgcol40, tgcol41, tgcol42, tgcol43, tgcol44, tgcol45,
tgcol46, tgcol47, tgcol48, tgcol49, tgcol50, tgcol51, tgcol52, tgcol53, tgcol54
from meters where tgcol1 > 10 AND tgcol1 < 100 and tgcol2 > 100 and tgcol2 < 1000 or tgcol3 > 10000 or tgcol7 = true
or tgcol8 like '%2' and tgcol10 < 10'''
t.queryData(query)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册