提交 dd640885 编写于 作者: D dapan1121

Merge branch '3.0' into fix/TD-15197

......@@ -233,7 +233,6 @@ struct SVnodeCfg {
};
typedef struct {
TSKEY lastKey;
uint64_t uid;
uint64_t groupId;
} STableKeyInfo;
......
......@@ -270,7 +270,7 @@ int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list) {
break;
}
STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, uid = id};
STableKeyInfo info = {uid = id};
taosArrayPush(list, &info);
}
......
......@@ -13,7 +13,6 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "ttime.h"
#include "function.h"
#include "functionMgt.h"
#include "index.h"
......@@ -21,6 +20,7 @@
#include "tdatablock.h"
#include "thash.h"
#include "tmsg.h"
#include "ttime.h"
#include "executil.h"
#include "executorimpl.h"
......@@ -72,7 +72,7 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo) {
assert(pGroupResInfo != NULL);
for(int32_t i = 0; i < taosArrayGetSize(pGroupResInfo->pRows); ++i) {
for (int32_t i = 0; i < taosArrayGetSize(pGroupResInfo->pRows); ++i) {
SResKeyPos* pRes = taosArrayGetP(pGroupResInfo->pRows, i);
taosMemoryFree(pRes);
}
......@@ -266,17 +266,24 @@ EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) {
}
int32_t isTableOk(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified) {
int32_t code = TSDB_CODE_SUCCESS;
SMetaReader mr = {0};
metaReaderInit(&mr, metaHandle, 0);
metaGetTableEntryByUid(&mr, info->uid);
code = metaGetTableEntryByUid(&mr, info->uid);
if (TSDB_CODE_SUCCESS != code) {
metaReaderClear(&mr);
return terrno;
}
SNode* pTagCondTmp = nodesCloneNode(pTagCond);
nodesRewriteExprPostOrder(&pTagCondTmp, doTranslateTagExpr, &mr);
metaReaderClear(&mr);
SNode* pNew = NULL;
int32_t code = scalarCalculateConstants(pTagCondTmp, &pNew);
SNode* pNew = NULL;
code = scalarCalculateConstants(pTagCondTmp, &pNew);
if (TSDB_CODE_SUCCESS != code) {
terrno = code;
nodesDestroyNode(pTagCondTmp);
......@@ -295,7 +302,8 @@ int32_t isTableOk(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool*
return TSDB_CODE_SUCCESS;
}
int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond, STableListInfo* pListInfo) {
int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond,
STableListInfo* pListInfo) {
int32_t code = TSDB_CODE_SUCCESS;
pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo));
......@@ -317,14 +325,14 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
code = doFilterTag(pTagIndexCond, &metaArg, res, &status);
if (code != 0 || status == SFLT_NOT_INDEX) {
qError("failed to get tableIds from index, reason:%s, suid:%" PRIu64, tstrerror(code), tableUid);
// code = TSDB_CODE_INDEX_REBUILDING;
// code = TSDB_CODE_INDEX_REBUILDING;
code = vnodeGetAllTableList(pVnode, tableUid, pListInfo->pTableList);
} else {
qDebug("success to get tableIds, size:%d, suid:%" PRIu64, (int)taosArrayGetSize(res), tableUid);
}
for (int i = 0; i < taosArrayGetSize(res); i++) {
STableKeyInfo info = {.lastKey = TSKEY_INITIAL_VAL, .uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
taosArrayPush(pListInfo->pTableList, &info);
}
taosArrayDestroy(res);
......@@ -338,7 +346,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
return code;
}
} else { // Create one table group.
STableKeyInfo info = {.lastKey = 0, .uid = tableUid, .groupId = 0};
STableKeyInfo info = {.uid = tableUid, .groupId = 0};
taosArrayPush(pListInfo->pTableList, &info);
}
......@@ -610,8 +618,7 @@ static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutpu
for (int32_t i = 0; i < numOfOutput; ++i) {
const char* pName = pCtx[i].pExpr->pExpr->_function.functionName;
if ((strcmp(pName, "_select_value") == 0) ||
(strcmp(pName, "_group_key") == 0)) {
if ((strcmp(pName, "_select_value") == 0) || (strcmp(pName, "_group_key") == 0)) {
pValCtx[num++] = &pCtx[i];
} else if (fmIsSelectFunc(pCtx[i].functionId)) {
p = &pCtx[i];
......@@ -747,11 +754,11 @@ SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode) {
SColumn extractColumnFromColumnNode(SColumnNode* pColNode) {
SColumn c = {0};
c.slotId = pColNode->slotId;
c.colId = pColNode->colId;
c.type = pColNode->node.resType.type;
c.bytes = pColNode->node.resType.bytes;
c.scale = pColNode->node.resType.scale;
c.slotId = pColNode->slotId;
c.colId = pColNode->colId;
c.type = pColNode->node.resType.type;
c.bytes = pColNode->node.resType.bytes;
c.scale = pColNode->node.resType.scale;
c.precision = pColNode->node.resType.precision;
return c;
}
......@@ -768,10 +775,10 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi
// pCond->twindow = pTableScanNode->scanRange;
// TODO: get it from stable scan node
pCond->twindows = pTableScanNode->scanRange;
pCond->suid = pTableScanNode->scan.suid;
pCond->type = BLOCK_LOAD_OFFSET_ORDER;
pCond->suid = pTableScanNode->scan.suid;
pCond->type = BLOCK_LOAD_OFFSET_ORDER;
pCond->startVersion = -1;
pCond->endVersion = -1;
pCond->endVersion = -1;
// pCond->type = pTableScanNode->scanFlag;
int32_t j = 0;
......@@ -850,11 +857,11 @@ static STimeWindow doCalculateTimeWindow(int64_t ts, SInterval* pInterval) {
}
STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order) {
int32_t factor = (order == TSDB_ORDER_ASC)? -1:1;
int32_t factor = (order == TSDB_ORDER_ASC) ? -1 : 1;
STimeWindow win = *pWindow;
STimeWindow save = win;
while(win.skey <= ts && win.ekey >= ts) {
while (win.skey <= ts && win.ekey >= ts) {
save = win;
win.skey = taosTimeAdd(win.skey, factor * pInterval->sliding, pInterval->slidingUnit, pInterval->precision);
win.ekey = taosTimeAdd(win.ekey, factor * pInterval->sliding, pInterval->slidingUnit, pInterval->precision);
......@@ -894,7 +901,6 @@ bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo) {
pLimitInfo->slimit.offset != -1);
}
static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; }
static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; }
......@@ -903,7 +909,7 @@ void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimit
SLimit slimit = {.limit = getLimit(pSLimit), .offset = getOffset(pSLimit)};
pLimitInfo->limit = limit;
pLimitInfo->slimit= slimit;
pLimitInfo->slimit = slimit;
pLimitInfo->remainOffset = limit.offset;
pLimitInfo->remainGroupOffset = slimit.offset;
}
\ No newline at end of file
}
......@@ -191,7 +191,7 @@ static SArray* filterQualifiedChildTables(const SStreamScanInfo* pScanInfo, cons
SMetaReader mr = {0};
metaReaderInit(&mr, pScanInfo->readHandle.meta, 0);
for (int32_t i = 0; i < taosArrayGetSize(tableIdList); ++i) {
int64_t* id = (int64_t*)taosArrayGet(tableIdList, i);
uint64_t* id = (uint64_t*)taosArrayGet(tableIdList, i);
int32_t code = metaGetTableEntryByUid(&mr, *id);
if (code != TSDB_CODE_SUCCESS) {
......@@ -206,7 +206,7 @@ static SArray* filterQualifiedChildTables(const SStreamScanInfo* pScanInfo, cons
if (pScanInfo->pTagCond != NULL) {
bool qualified = false;
STableKeyInfo info = {.groupId = 0, .uid = mr.me.uid, .lastKey = 0};
STableKeyInfo info = {.groupId = 0, .uid = mr.me.uid};
code = isTableOk(&info, pScanInfo->pTagCond, pScanInfo->readHandle.meta, &qualified);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to filter new table, uid:0x%" PRIx64 ", %s", info.uid, idstr);
......@@ -218,9 +218,7 @@ static SArray* filterQualifiedChildTables(const SStreamScanInfo* pScanInfo, cons
}
}
/*pScanInfo->pStreamScanOp->pTaskInfo->tableqinfoList.*/
// handle multiple partition
taosArrayPush(qa, id);
}
......@@ -244,6 +242,19 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
qDebug(" %d qualified child tables added into stream scanner", (int32_t)taosArrayGetSize(qa));
code = tqReaderAddTbUidList(pScanInfo->tqReader, qa);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
// add to qTaskInfo
// todo refactor STableList
for(int32_t i = 0; i < taosArrayGetSize(qa); ++i) {
uint64_t* uid = taosArrayGet(qa, i);
STableKeyInfo keyInfo = {.uid = *uid, .groupId = 0};
taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &keyInfo);
}
taosArrayDestroy(qa);
} else { // remove the table id in current list
qDebug(" %d remove child tables from the stream scanner", (int32_t)taosArrayGetSize(tableIdList));
......
......@@ -3351,7 +3351,11 @@ static SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
// filter shall be applied after apply functions and limit/offset on the result
doFilter(pProjectInfo->pFilterNode, pInfo->pRes);
if (status == PROJECT_RETRIEVE_CONTINUE) {
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) {
break;
}
if (status == PROJECT_RETRIEVE_CONTINUE || pInfo->pRes->info.rows == 0) {
continue;
} else if (status == PROJECT_RETRIEVE_DONE) {
break;
......@@ -3957,7 +3961,7 @@ static SSDataBlock* doApplyIndefinitFunction(SOperatorInfo* pOperator) {
doFilter(pIndefInfo->pCondition, pInfo->pRes);
size_t rows = pInfo->pRes->info.rows;
if (rows >= 0) {
if (rows > 0 || pOperator->status == OP_EXEC_DONE) {
break;
}
}
......@@ -4457,7 +4461,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
return NULL;
}
} else { // Create one table group.
STableKeyInfo info = {.lastKey = 0, .uid = pBlockNode->uid, .groupId = 0};
STableKeyInfo info = {.uid = pBlockNode->uid, .groupId = 0};
taosArrayPush(pTableListInfo->pTableList, &info);
}
......
......@@ -2467,9 +2467,7 @@ bool apercentileFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResult
int32_t apercentileFunction(SqlFunctionCtx* pCtx) {
int32_t numOfElems = 0;
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SInputColumnInfoData* pInput = &pCtx->input;
// SColumnDataAgg* pAgg = pInput->pColumnDataAgg[0];
SColumnInfoData* pCol = pInput->pData[0];
int32_t type = pCol->info.type;
......@@ -2502,6 +2500,9 @@ int32_t apercentileFunction(SqlFunctionCtx* pCtx) {
GET_TYPED_DATA(v, double, type, data);
tHistogramAdd(&pInfo->pHisto, v);
}
qDebug("add %d elements into histogram, total:%d, numOfEntry:%d, %p", numOfElems, pInfo->pHisto->numOfElems,
pInfo->pHisto->numOfEntries, pInfo->pHisto);
}
SET_VAL(pResInfo, numOfElems, 1);
......@@ -2540,11 +2541,19 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo*
if (pHisto->numOfElems <= 0) {
memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1));
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
qDebug("merge histo, total:%"PRId64", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries, pHisto);
} else {
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
qDebug("input histogram, elem:%"PRId64", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries,
pInput->pHisto);
SHistogramInfo* pRes = tHistogramMerge(pHisto, pInput->pHisto, MAX_HISTOGRAM_BIN);
memcpy(pHisto, pRes, sizeof(SHistogramInfo) + sizeof(SHistBin) * MAX_HISTOGRAM_BIN);
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
qDebug("merge histo, total:%"PRId64", entry:%d, %p", pHisto->numOfElems, pHisto->numOfEntries,
pHisto);
tHistogramDestroy(&pRes);
}
}
......@@ -2560,14 +2569,20 @@ int32_t apercentileFunctionMerge(SqlFunctionCtx* pCtx) {
SAPercentileInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
int32_t start = pInput->startRowIndex;
qDebug("total %d rows will merge, %p", pInput->numOfRows, pInfo->pHisto);
int32_t start = pInput->startRowIndex;
for (int32_t i = start; i < start + pInput->numOfRows; ++i) {
char* data = colDataGetData(pCol, i);
char* data = colDataGetData(pCol, i);
SAPercentileInfo* pInputInfo = (SAPercentileInfo*)varDataVal(data);
apercentileTransferInfo(pInputInfo, pInfo);
}
if (pInfo->algo != APERCT_ALGO_TDIGEST) {
qDebug("after merge, total:%d, numOfEntry:%d, %p", pInfo->pHisto->numOfElems, pInfo->pHisto->numOfEntries, pInfo->pHisto);
}
SET_VAL(pResInfo, 1, 1);
return TSDB_CODE_SUCCESS;
}
......@@ -2585,6 +2600,8 @@ int32_t apercentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
}
} else {
if (pInfo->pHisto->numOfElems > 0) {
qDebug("get the final res:%d, elements:%"PRId64", entry:%d", pInfo->pHisto->numOfElems, pInfo->pHisto->numOfEntries);
double ratio[] = {pInfo->percent};
double* res = tHistogramUniform(pInfo->pHisto, ratio, 1);
pInfo->result = *res;
......@@ -2638,6 +2655,9 @@ int32_t apercentileCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx)
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
SAPercentileInfo* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
ASSERT(pDBuf->algo == pSBuf->algo);
qDebug("start to combine apercentile, %p", pDBuf->pHisto);
apercentileTransferInfo(pSBuf, pDBuf);
pDResInfo->numOfRes = TMAX(pDResInfo->numOfRes, pSResInfo->numOfRes);
return TSDB_CODE_SUCCESS;
......
......@@ -369,6 +369,8 @@ static void destroyPhysiNode(SPhysiNode* pNode) {
nodesDestroyList(pNode->pChildren);
nodesDestroyNode(pNode->pConditions);
nodesDestroyNode((SNode*)pNode->pOutputDataBlockDesc);
nodesDestroyNode(pNode->pLimit);
nodesDestroyNode(pNode->pSlimit);
}
static void destroyWinodwPhysiNode(SWinodwPhysiNode* pNode) {
......@@ -389,6 +391,16 @@ static void destroyDataSinkNode(SDataSinkNode* pNode) { nodesDestroyNode((SNode*
static void destroyExprNode(SExprNode* pExpr) { taosArrayDestroy(pExpr->pAssociation); }
static void destroyTableCfg(STableCfg* pCfg) {
taosArrayDestroy(pCfg->pFuncs);
taosMemoryFree(pCfg->pComment);
taosMemoryFree(pCfg->pSchemas);
taosMemoryFree(pCfg->pTags);
taosMemoryFree(pCfg);
}
static void destroySmaIndex(void* pIndex) { taosMemoryFree(((STableIndexInfo*)pIndex)->expr); }
void nodesDestroyNode(SNode* pNode) {
if (NULL == pNode) {
return;
......@@ -426,6 +438,7 @@ void nodesDestroyNode(SNode* pNode) {
SRealTableNode* pReal = (SRealTableNode*)pNode;
taosMemoryFreeClear(pReal->pMeta);
taosMemoryFreeClear(pReal->pVgroupList);
taosArrayDestroyEx(pReal->pSmaIndexes, destroySmaIndex);
break;
}
case QUERY_NODE_TEMP_TABLE:
......@@ -446,9 +459,12 @@ void nodesDestroyNode(SNode* pNode) {
break;
case QUERY_NODE_LIMIT: // no pointer field
break;
case QUERY_NODE_STATE_WINDOW:
nodesDestroyNode(((SStateWindowNode*)pNode)->pExpr);
case QUERY_NODE_STATE_WINDOW: {
SStateWindowNode* pState = (SStateWindowNode*)pNode;
nodesDestroyNode(pState->pCol);
nodesDestroyNode(pState->pExpr);
break;
}
case QUERY_NODE_SESSION_WINDOW: {
SSessionWindowNode* pSession = (SSessionWindowNode*)pNode;
nodesDestroyNode((SNode*)pSession->pCol);
......@@ -495,8 +511,10 @@ void nodesDestroyNode(SNode* pNode) {
}
case QUERY_NODE_TABLE_OPTIONS: {
STableOptions* pOptions = (STableOptions*)pNode;
nodesDestroyList(pOptions->pSma);
nodesDestroyList(pOptions->pMaxDelay);
nodesDestroyList(pOptions->pWatermark);
nodesDestroyList(pOptions->pRollupFuncs);
nodesDestroyList(pOptions->pSma);
break;
}
case QUERY_NODE_INDEX_OPTIONS: {
......@@ -505,17 +523,22 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pOptions->pInterval);
nodesDestroyNode(pOptions->pOffset);
nodesDestroyNode(pOptions->pSliding);
nodesDestroyNode(pOptions->pStreamOptions);
break;
}
case QUERY_NODE_EXPLAIN_OPTIONS: // no pointer field
break;
case QUERY_NODE_STREAM_OPTIONS:
nodesDestroyNode(((SStreamOptions*)pNode)->pWatermark);
case QUERY_NODE_STREAM_OPTIONS: {
SStreamOptions* pOptions = (SStreamOptions*)pNode;
nodesDestroyNode(pOptions->pDelay);
nodesDestroyNode(pOptions->pWatermark);
break;
}
case QUERY_NODE_LEFT_VALUE: // no pointer field
break;
case QUERY_NODE_SET_OPERATOR: {
SSetOperator* pStmt = (SSetOperator*)pNode;
nodesDestroyList(pStmt->pProjectionList);
nodesDestroyNode(pStmt->pLeft);
nodesDestroyNode(pStmt->pRight);
nodesDestroyList(pStmt->pOrderByList);
......@@ -577,7 +600,8 @@ void nodesDestroyNode(SNode* pNode) {
break;
case QUERY_NODE_DROP_SUPER_TABLE_STMT: // no pointer field
break;
case QUERY_NODE_ALTER_TABLE_STMT: {
case QUERY_NODE_ALTER_TABLE_STMT:
case QUERY_NODE_ALTER_SUPER_TABLE_STMT: {
SAlterTableStmt* pStmt = (SAlterTableStmt*)pNode;
nodesDestroyNode((SNode*)pStmt->pOptions);
nodesDestroyNode((SNode*)pStmt->pVal);
......@@ -681,14 +705,15 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pStmt->pTbName);
break;
}
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT: // no pointer field
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
nodesDestroyNode(((SShowDnodeVariablesStmt*)pNode)->pDnodeId);
break;
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
taosMemoryFreeClear(((SShowCreateDatabaseStmt*)pNode)->pCfg);
break;
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
taosMemoryFreeClear(((SShowCreateTableStmt*)pNode)->pCfg);
destroyTableCfg((STableCfg*)(((SShowCreateTableStmt*)pNode)->pCfg));
break;
case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT: // no pointer field
case QUERY_NODE_KILL_CONNECTION_STMT: // no pointer field
......@@ -713,7 +738,6 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_QUERY: {
SQuery* pQuery = (SQuery*)pNode;
nodesDestroyNode(pQuery->pRoot);
nodesDestroyNode(pQuery->pPrepareRoot);
taosMemoryFreeClear(pQuery->pResSchema);
if (NULL != pQuery->pCmdMsg) {
taosMemoryFreeClear(pQuery->pCmdMsg->pMsg);
......@@ -722,6 +746,7 @@ void nodesDestroyNode(SNode* pNode) {
taosArrayDestroy(pQuery->pDbList);
taosArrayDestroy(pQuery->pTableList);
taosArrayDestroy(pQuery->pPlaceholderValues);
nodesDestroyNode(pQuery->pPrepareRoot);
break;
}
case QUERY_NODE_LOGIC_PLAN_SCAN: {
......@@ -733,7 +758,7 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyList(pLogicNode->pDynamicScanFuncs);
nodesDestroyNode(pLogicNode->pTagCond);
nodesDestroyNode(pLogicNode->pTagIndexCond);
taosArrayDestroy(pLogicNode->pSmaIndexes);
taosArrayDestroyEx(pLogicNode->pSmaIndexes, destroySmaIndex);
nodesDestroyList(pLogicNode->pGroupTags);
break;
}
......@@ -762,6 +787,9 @@ void nodesDestroyNode(SNode* pNode) {
destroyLogicNode((SLogicNode*)pLogicNode);
destroyVgDataBlockArray(pLogicNode->pDataBlocks);
// pVgDataBlocks is weak reference
nodesDestroyNode(pLogicNode->pAffectedRows);
taosMemoryFreeClear(pLogicNode->pVgroupList);
nodesDestroyList(pLogicNode->pInsertCols);
break;
}
case QUERY_NODE_LOGIC_PLAN_EXCHANGE:
......@@ -780,6 +808,7 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyList(pLogicNode->pFuncs);
nodesDestroyNode(pLogicNode->pTspk);
nodesDestroyNode(pLogicNode->pTsEnd);
nodesDestroyNode(pLogicNode->pStateExpr);
break;
}
case QUERY_NODE_LOGIC_PLAN_FILL: {
......@@ -829,9 +858,14 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN:
destroyScanPhysiNode((SScanPhysiNode*)pNode);
break;
case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN: {
SLastRowScanPhysiNode* pPhyNode = (SLastRowScanPhysiNode*)pNode;
destroyScanPhysiNode((SScanPhysiNode*)pNode);
nodesDestroyList(pPhyNode->pGroupTags);
break;
}
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
......
......@@ -462,7 +462,7 @@ explain_options(A) ::= explain_options(B) VERBOSE NK_BOOL(C).
explain_options(A) ::= explain_options(B) RATIO NK_FLOAT(C). { A = setExplainRatio(pCxt, B, &C); }
/************************************************ compact *************************************************************/
cmd ::= COMPACT VNODES IN NK_LP integer_list(A) NK_RP. { pCxt->pRootNode = createCompactStmt(pCxt, A); }
cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP. { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
/************************************************ create/drop function ************************************************/
cmd ::= CREATE agg_func_opt(A) FUNCTION not_exists_opt(F) function_name(B)
......
......@@ -387,6 +387,19 @@ SNode* createLogicConditionNode(SAstCreateContext* pCxt, ELogicConditionType typ
return (SNode*)cond;
}
static uint8_t getMinusDataType(uint8_t orgType) {
switch (orgType) {
case TSDB_DATA_TYPE_UTINYINT:
case TSDB_DATA_TYPE_USMALLINT:
case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_UBIGINT:
return TSDB_DATA_TYPE_BIGINT;
default:
break;
}
return orgType;
}
SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pLeft, SNode* pRight) {
CHECK_PARSER_STATUS(pCxt);
if (OP_TYPE_MINUS == type && QUERY_NODE_VALUE == nodeType(pLeft)) {
......@@ -402,7 +415,7 @@ SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pL
}
taosMemoryFree(pVal->literal);
pVal->literal = pNewLiteral;
pVal->node.resType.type = TSDB_DATA_TYPE_BIGINT;
pVal->node.resType.type = getMinusDataType(pVal->node.resType.type);
return pLeft;
}
SOperatorNode* op = (SOperatorNode*)nodesMakeNode(QUERY_NODE_OPERATOR);
......
......@@ -1257,6 +1257,7 @@ static int32_t rewriteFuncToValue(STranslateContext* pCxt, char* pLiteral, SNode
}
}
if (DEAL_RES_ERROR != translateValue(pCxt, pVal)) {
nodesDestroyNode(*pNode);
*pNode = (SNode*)pVal;
} else {
nodesDestroyNode((SNode*)pVal);
......@@ -4009,30 +4010,7 @@ static SSchema* getTagSchema(STableMeta* pTableMeta, const char* pTagName) {
return NULL;
}
static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) {
if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType || TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME == pStmt->alterType) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE,
"Set tag value only available for child table");
}
if (pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_OPTIONS && -1 != pStmt->pOptions->ttl) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
}
if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
}
if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_COLUMN) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON);
}
STableMeta* pTableMeta = NULL;
int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
static int32_t checkAlterSuperTableImpl(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta) {
SSchema* pTagsSchema = getTableTagSchema(pTableMeta);
if (getNumOfTags(pTableMeta) == 1 && pTagsSchema->type == TSDB_DATA_TYPE_JSON &&
(pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG || pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG ||
......@@ -4057,6 +4035,33 @@ static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pS
return TSDB_CODE_SUCCESS;
}
static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) {
if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType || TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME == pStmt->alterType) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE,
"Set tag value only available for child table");
}
if (pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_OPTIONS && -1 != pStmt->pOptions->ttl) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
}
if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
}
if (pStmt->dataType.type == TSDB_DATA_TYPE_JSON && pStmt->alterType == TSDB_ALTER_TABLE_ADD_COLUMN) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON);
}
STableMeta* pTableMeta = NULL;
int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta);
if (TSDB_CODE_SUCCESS == code) {
code = checkAlterSuperTableImpl(pCxt, pStmt, pTableMeta);
}
taosMemoryFree(pTableMeta);
return code;
}
static int32_t translateAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) {
SMAlterStbReq alterReq = {0};
int32_t code = checkAlterSuperTable(pCxt, pStmt);
......@@ -6438,6 +6443,7 @@ static int32_t toMsgType(ENodeType type) {
static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery) {
if (NULL != pCxt->pDbs) {
taosArrayDestroy(pQuery->pDbList);
pQuery->pDbList = taosArrayInit(taosHashGetSize(pCxt->pDbs), TSDB_DB_FNAME_LEN);
if (NULL == pQuery->pDbList) {
return TSDB_CODE_OUT_OF_MEMORY;
......@@ -6450,6 +6456,7 @@ static int32_t setRefreshMate(STranslateContext* pCxt, SQuery* pQuery) {
}
if (NULL != pCxt->pTables) {
taosArrayDestroy(pQuery->pTableList);
pQuery->pTableList = taosArrayInit(taosHashGetSize(pCxt->pTables), sizeof(SName));
if (NULL == pQuery->pTableList) {
return TSDB_CODE_OUT_OF_MEMORY;
......@@ -6521,6 +6528,7 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
pQuery->stableQuery = pCxt->stableQuery;
if (pQuery->haveResultSet) {
taosMemoryFreeClear(pQuery->pResSchema);
if (TSDB_CODE_SUCCESS != extractResultSchema(pQuery->pRoot, &pQuery->numOfResCols, &pQuery->pResSchema)) {
return TSDB_CODE_OUT_OF_MEMORY;
}
......
......@@ -865,12 +865,15 @@ STableCfg* tableCfgDup(STableCfg* pCfg) {
STableCfg* pNew = taosMemoryMalloc(sizeof(*pNew));
memcpy(pNew, pCfg, sizeof(*pNew));
if (pNew->pComment) {
if (NULL != pNew->pComment) {
pNew->pComment = strdup(pNew->pComment);
}
if (pNew->pFuncs) {
if (NULL != pNew->pFuncs) {
pNew->pFuncs = taosArrayDup(pNew->pFuncs);
}
if (NULL != pNew->pTags) {
pNew->pTags = strdup(pNew->pTags);
}
int32_t schemaSize = (pCfg->numOfColumns + pCfg->numOfTags) * sizeof(SSchema);
......
......@@ -4117,7 +4117,8 @@ static YYACTIONTYPE yy_reduce(
yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
case 254: /* cmd ::= COMPACT VNODES IN NK_LP integer_list NK_RP */
{ pCxt->pRootNode = createCompactStmt(pCxt, yymsp[-1].minor.yy356); }
{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
yy_destructor(yypParser,273,&yymsp[-1].minor);
break;
case 255: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy151, yymsp[-8].minor.yy151, &yymsp[-5].minor.yy361, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy600, yymsp[0].minor.yy734); }
......
......@@ -93,6 +93,17 @@ class MockCatalogServiceImpl {
MockCatalogServiceImpl() : id_(1) {}
~MockCatalogServiceImpl() {
for (auto& cfg : dbCfg_) {
taosArrayDestroy(cfg.second.pRetensions);
}
for (auto& indexes : index_) {
for (auto& index : indexes.second) {
taosMemoryFree(index.expr);
}
}
}
int32_t catalogGetHandle() const { return 0; }
int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const {
......@@ -676,6 +687,7 @@ void MockCatalogService::destoryCatalogReq(SCatalogReq* pReq) {
taosArrayDestroy(pReq->pIndex);
taosArrayDestroy(pReq->pUser);
taosArrayDestroy(pReq->pTableIndex);
taosArrayDestroy(pReq->pTableCfg);
delete pReq;
}
......@@ -684,6 +696,11 @@ void MockCatalogService::destoryMetaRes(void* p) {
taosMemoryFree(pRes->pRes);
}
void MockCatalogService::destoryMetaArrayRes(void* p) {
SMetaRes* pRes = (SMetaRes*)p;
taosArrayDestroy((SArray*)pRes->pRes);
}
void MockCatalogService::destoryMetaData(SMetaData* pData) {
taosArrayDestroyEx(pData->pDbVgroup, destoryMetaRes);
taosArrayDestroyEx(pData->pDbCfg, destoryMetaRes);
......@@ -695,5 +712,8 @@ void MockCatalogService::destoryMetaData(SMetaData* pData) {
taosArrayDestroyEx(pData->pIndex, destoryMetaRes);
taosArrayDestroyEx(pData->pUser, destoryMetaRes);
taosArrayDestroyEx(pData->pQnodeList, destoryMetaRes);
taosArrayDestroyEx(pData->pTableCfg, destoryMetaRes);
taosArrayDestroyEx(pData->pDnodeList, destoryMetaArrayRes);
taosMemoryFree(pData->pSvrVer);
delete pData;
}
......@@ -52,6 +52,7 @@ class MockCatalogService {
public:
static void destoryCatalogReq(SCatalogReq* pReq);
static void destoryMetaRes(void* p);
static void destoryMetaArrayRes(void* p);
static void destoryMetaData(SMetaData* pData);
MockCatalogService();
......
......@@ -21,7 +21,11 @@ namespace ParserTest {
class ParserInitialCTest : public ParserDdlTest {};
// todo compact
TEST_F(ParserInitialCTest, compact) {
useDb("root", "test");
run("COMPACT VNODES IN (1, 2)", TSDB_CODE_PAR_EXPRIE_STATEMENT, PARSER_STAGE_PARSE);
}
TEST_F(ParserInitialCTest, createAccount) {
useDb("root", "test");
......@@ -32,6 +36,19 @@ TEST_F(ParserInitialCTest, createAccount) {
TEST_F(ParserInitialCTest, createBnode) {
useDb("root", "test");
SMCreateQnodeReq expect = {0};
auto setCreateQnodeReq = [&](int32_t dnodeId) { expect.dnodeId = dnodeId; };
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_BNODE_STMT);
SMCreateQnodeReq req = {0};
ASSERT_TRUE(TSDB_CODE_SUCCESS ==
tDeserializeSCreateDropMQSBNodeReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
ASSERT_EQ(req.dnodeId, expect.dnodeId);
});
setCreateQnodeReq(1);
run("CREATE BNODE ON DNODE 1");
}
......
......@@ -123,6 +123,14 @@ class ParserTestBaseImpl {
delete pMetaCache;
}
static void _destroyQuery(SQuery** pQuery) {
if (nullptr == pQuery) {
return;
}
qDestroyQuery(*pQuery);
taosMemoryFree(pQuery);
}
bool checkResultCode(const string& pFunc, int32_t resultCode) {
return !(stmtEnv_.checkFunc_.empty())
? ((stmtEnv_.checkFunc_ == pFunc) ? stmtEnv_.expect_ == resultCode : TSDB_CODE_SUCCESS == resultCode)
......@@ -278,9 +286,9 @@ class ParserTestBaseImpl {
SParseContext cxt = {0};
setParseContext(sql, &cxt);
SQuery* pQuery = nullptr;
doParse(&cxt, &pQuery);
unique_ptr<SQuery, void (*)(SQuery*)> query(pQuery, qDestroyQuery);
unique_ptr<SQuery*, void (*)(SQuery**)> query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
doParse(&cxt, query.get());
SQuery* pQuery = *(query.get());
doAuthenticate(&cxt, pQuery, nullptr);
......@@ -306,9 +314,9 @@ class ParserTestBaseImpl {
SParseContext cxt = {0};
setParseContext(sql, &cxt);
SQuery* pQuery = nullptr;
doParseSql(&cxt, &pQuery);
unique_ptr<SQuery, void (*)(SQuery*)> query(pQuery, qDestroyQuery);
unique_ptr<SQuery*, void (*)(SQuery**)> query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
doParseSql(&cxt, query.get());
SQuery* pQuery = *(query.get());
if (g_dump) {
dump();
......@@ -328,9 +336,9 @@ class ParserTestBaseImpl {
SParseContext cxt = {0};
setParseContext(sql, &cxt, true);
SQuery* pQuery = nullptr;
doParse(&cxt, &pQuery);
unique_ptr<SQuery, void (*)(SQuery*)> query(pQuery, qDestroyQuery);
unique_ptr<SQuery*, void (*)(SQuery**)> query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
doParse(&cxt, query.get());
SQuery* pQuery = *(query.get());
unique_ptr<SParseMetaCache, void (*)(SParseMetaCache*)> metaCache(new SParseMetaCache(), _destoryParseMetaCache);
doCollectMetaKey(&cxt, pQuery, metaCache.get());
......@@ -386,9 +394,9 @@ class ParserTestBaseImpl {
unique_ptr<SCatalogReq, void (*)(SCatalogReq*)> catalogReq(new SCatalogReq(),
MockCatalogService::destoryCatalogReq);
SQuery* pQuery = nullptr;
doParseSqlSyntax(&cxt, &pQuery, catalogReq.get());
unique_ptr<SQuery, void (*)(SQuery*)> query(pQuery, qDestroyQuery);
unique_ptr<SQuery*, void (*)(SQuery**)> query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
doParseSqlSyntax(&cxt, query.get(), catalogReq.get());
SQuery* pQuery = *(query.get());
string err;
thread t1([&]() {
......
......@@ -1068,7 +1068,11 @@ static int32_t createExchangePhysiNode(SPhysiPlanContext* pCxt, SExchangeLogicNo
}
static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWinodwPhysiNode* pWindow,
SWindowLogicNode* pWindowLogicNode, SPhysiNode** pPhyNode) {
SWindowLogicNode* pWindowLogicNode) {
pWindow->triggerType = pWindowLogicNode->triggerType;
pWindow->watermark = pWindowLogicNode->watermark;
pWindow->igExpired = pWindowLogicNode->igExpired;
SNodeList* pPrecalcExprs = NULL;
SNodeList* pFuncs = NULL;
int32_t code = rewritePrecalcExprs(pCxt, pWindowLogicNode->pFuncs, &pPrecalcExprs, &pFuncs);
......@@ -1100,16 +1104,6 @@ static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList*
code = setConditionsSlotId(pCxt, (const SLogicNode*)pWindowLogicNode, (SPhysiNode*)pWindow);
}
pWindow->triggerType = pWindowLogicNode->triggerType;
pWindow->watermark = pWindowLogicNode->watermark;
pWindow->igExpired = pWindowLogicNode->igExpired;
if (TSDB_CODE_SUCCESS == code) {
*pPhyNode = (SPhysiNode*)pWindow;
} else {
nodesDestroyNode((SNode*)pWindow);
}
nodesDestroyList(pPrecalcExprs);
nodesDestroyList(pFuncs);
......@@ -1156,7 +1150,14 @@ static int32_t createIntervalPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChil
pInterval->intervalUnit = pWindowLogicNode->intervalUnit;
pInterval->slidingUnit = pWindowLogicNode->slidingUnit;
return createWindowPhysiNodeFinalize(pCxt, pChildren, &pInterval->window, pWindowLogicNode, pPhyNode);
int32_t code = createWindowPhysiNodeFinalize(pCxt, pChildren, &pInterval->window, pWindowLogicNode);
if (TSDB_CODE_SUCCESS == code) {
*pPhyNode = (SPhysiNode*)pInterval;
} else {
nodesDestroyNode((SNode*)pInterval);
}
return code;
}
static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
......@@ -1169,7 +1170,14 @@ static int32_t createSessionWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList*
pSession->gap = pWindowLogicNode->sessionGap;
return createWindowPhysiNodeFinalize(pCxt, pChildren, &pSession->window, pWindowLogicNode, pPhyNode);
int32_t code = createWindowPhysiNodeFinalize(pCxt, pChildren, &pSession->window, pWindowLogicNode);
if (TSDB_CODE_SUCCESS == code) {
*pPhyNode = (SPhysiNode*)pSession;
} else {
nodesDestroyNode((SNode*)pSession);
}
return code;
}
static int32_t createStateWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
......@@ -1201,12 +1209,20 @@ static int32_t createStateWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pC
}
}
if (TSDB_CODE_SUCCESS != code) {
if (TSDB_CODE_SUCCESS == code) {
code = createWindowPhysiNodeFinalize(pCxt, pChildren, &pState->window, pWindowLogicNode);
}
if (TSDB_CODE_SUCCESS == code) {
*pPhyNode = (SPhysiNode*)pState;
} else {
nodesDestroyNode((SNode*)pState);
return code;
}
return createWindowPhysiNodeFinalize(pCxt, pChildren, &pState->window, pWindowLogicNode, pPhyNode);
nodesDestroyList(pPrecalcExprs);
nodesDestroyNode(pStateKey);
return code;
}
static int32_t createWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWindowLogicNode* pWindowLogicNode,
......
......@@ -867,10 +867,11 @@ static int32_t stbSplSplitSortNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
if (TSDB_CODE_SUCCESS == code) {
code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pInfo->pSplitNode, pMergeKeys, pPartSort, groupSort);
}
if (TSDB_CODE_SUCCESS == code && groupSort) {
stbSplSetScanPartSort(pPartSort);
}
if (TSDB_CODE_SUCCESS == code) {
nodesDestroyNode((SNode*)pInfo->pSplitNode);
if (groupSort) {
stbSplSetScanPartSort(pPartSort);
}
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
(SNode*)splCreateScanSubplan(pCxt, pPartSort, SPLIT_FLAG_STABLE_SPLIT));
}
......
......@@ -24,6 +24,16 @@ class PlanStmtTest : public PlannerTestBase {
return (TAOS_MULTI_BIND*)taosMemoryCalloc(nParams, sizeof(TAOS_MULTI_BIND));
}
void destoryBindParams(TAOS_MULTI_BIND* pParams, int32_t nParams) {
for (int32_t i = 0; i < nParams; ++i) {
TAOS_MULTI_BIND* pParam = pParams + i;
taosMemoryFree(pParam->buffer);
taosMemoryFree(pParam->length);
taosMemoryFree(pParam->is_null);
}
taosMemoryFree(pParams);
}
TAOS_MULTI_BIND* buildIntegerParam(TAOS_MULTI_BIND* pBindParams, int32_t index, int64_t val, int32_t type) {
TAOS_MULTI_BIND* pBindParam = initParam(pBindParams, index, type, 0);
......@@ -127,8 +137,10 @@ TEST_F(PlanStmtTest, basic) {
useDb("root", "test");
prepare("SELECT * FROM t1 WHERE c1 = ?");
bindParams(buildIntegerParam(createBindParams(1), 0, 10, TSDB_DATA_TYPE_INT), 0);
TAOS_MULTI_BIND* pBindParams = buildIntegerParam(createBindParams(1), 0, 10, TSDB_DATA_TYPE_INT);
bindParams(pBindParams, 0);
exec();
destoryBindParams(pBindParams, 1);
{
prepare("SELECT * FROM t1 WHERE c1 = ? AND c2 = ?");
......@@ -137,7 +149,7 @@ TEST_F(PlanStmtTest, basic) {
buildStringParam(pBindParams, 1, "abc", TSDB_DATA_TYPE_VARCHAR, strlen("abc"));
bindParams(pBindParams, -1);
exec();
taosMemoryFreeClear(pBindParams);
destoryBindParams(pBindParams, 2);
}
{
......@@ -147,7 +159,7 @@ TEST_F(PlanStmtTest, basic) {
buildIntegerParam(pBindParams, 1, 20, TSDB_DATA_TYPE_INT);
bindParams(pBindParams, -1);
exec();
taosMemoryFreeClear(pBindParams);
destoryBindParams(pBindParams, 2);
}
}
......@@ -155,12 +167,16 @@ TEST_F(PlanStmtTest, multiExec) {
useDb("root", "test");
prepare("SELECT * FROM t1 WHERE c1 = ?");
bindParams(buildIntegerParam(createBindParams(1), 0, 10, TSDB_DATA_TYPE_INT), 0);
TAOS_MULTI_BIND* pBindParams = buildIntegerParam(createBindParams(1), 0, 10, TSDB_DATA_TYPE_INT);
bindParams(pBindParams, 0);
exec();
bindParams(buildIntegerParam(createBindParams(1), 0, 20, TSDB_DATA_TYPE_INT), 0);
destoryBindParams(pBindParams, 1);
pBindParams = buildIntegerParam(createBindParams(1), 0, 20, TSDB_DATA_TYPE_INT);
bindParams(pBindParams, 0);
exec();
bindParams(buildIntegerParam(createBindParams(1), 0, 30, TSDB_DATA_TYPE_INT), 0);
destoryBindParams(pBindParams, 1);
pBindParams = buildIntegerParam(createBindParams(1), 0, 30, TSDB_DATA_TYPE_INT);
bindParams(pBindParams, 0);
exec();
destoryBindParams(pBindParams, 1);
}
TEST_F(PlanStmtTest, allDataType) { useDb("root", "test"); }
......@@ -126,9 +126,9 @@ class PlannerTestBaseImpl {
reset();
tsQueryPolicy = queryPolicy;
try {
SQuery* pQuery = nullptr;
doParseSql(sql, &pQuery);
unique_ptr<SQuery, void (*)(SQuery*)> query(pQuery, qDestroyQuery);
unique_ptr<SQuery*, void (*)(SQuery**)> query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
doParseSql(sql, query.get());
SQuery* pQuery = *(query.get());
SPlanContext cxt = {0};
setPlanContext(pQuery, &cxt);
......@@ -199,6 +199,8 @@ class PlannerTestBaseImpl {
SLogicSubplan* pLogicSubplan = nullptr;
doCreateLogicPlan(&cxt, &pLogicSubplan);
unique_ptr<SLogicSubplan, void (*)(SLogicSubplan*)> logicSubplan(pLogicSubplan,
(void (*)(SLogicSubplan*))nodesDestroyNode);
doOptimizeLogicPlan(&cxt, pLogicSubplan);
......@@ -206,9 +208,12 @@ class PlannerTestBaseImpl {
SQueryLogicPlan* pLogicPlan = nullptr;
doScaleOutLogicPlan(&cxt, pLogicSubplan, &pLogicPlan);
unique_ptr<SQueryLogicPlan, void (*)(SQueryLogicPlan*)> logicPlan(pLogicPlan,
(void (*)(SQueryLogicPlan*))nodesDestroyNode);
SQueryPlan* pPlan = nullptr;
doCreatePhysiPlan(&cxt, pLogicPlan, &pPlan);
unique_ptr<SQueryPlan, void (*)(SQueryPlan*)> plan(pPlan, (void (*)(SQueryPlan*))nodesDestroyNode);
dump(g_dumpModule);
} catch (...) {
......@@ -249,6 +254,14 @@ class PlannerTestBaseImpl {
vector<string> physiSubplans_;
};
static void _destroyQuery(SQuery** pQuery) {
if (nullptr == pQuery) {
return;
}
qDestroyQuery(*pQuery);
taosMemoryFree(pQuery);
}
void reset() {
stmtEnv_.sql_.clear();
stmtEnv_.msgBuf_.fill(0);
......@@ -400,20 +413,30 @@ class PlannerTestBaseImpl {
pCxt->queryId = 1;
pCxt->pUser = caseEnv_.user_.c_str();
if (QUERY_NODE_CREATE_TOPIC_STMT == nodeType(pQuery->pRoot)) {
pCxt->pAstRoot = ((SCreateTopicStmt*)pQuery->pRoot)->pQuery;
SCreateTopicStmt* pStmt = (SCreateTopicStmt*)pQuery->pRoot;
pCxt->pAstRoot = pStmt->pQuery;
pStmt->pQuery = nullptr;
nodesDestroyNode(pQuery->pRoot);
pQuery->pRoot = pCxt->pAstRoot;
pCxt->topicQuery = true;
} else if (QUERY_NODE_CREATE_INDEX_STMT == nodeType(pQuery->pRoot)) {
SMCreateSmaReq req = {0};
tDeserializeSMCreateSmaReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req);
g_mockCatalogService->createSmaIndex(&req);
nodesStringToNode(req.ast, &pCxt->pAstRoot);
tFreeSMCreateSmaReq(&req);
nodesDestroyNode(pQuery->pRoot);
pQuery->pRoot = pCxt->pAstRoot;
pCxt->streamQuery = true;
} else if (QUERY_NODE_CREATE_STREAM_STMT == nodeType(pQuery->pRoot)) {
SCreateStreamStmt* pStmt = (SCreateStreamStmt*)pQuery->pRoot;
pCxt->pAstRoot = pStmt->pQuery;
pStmt->pQuery = nullptr;
pCxt->streamQuery = true;
pCxt->triggerType = pStmt->pOptions->triggerType;
pCxt->watermark = (NULL != pStmt->pOptions->pWatermark ? ((SValueNode*)pStmt->pOptions->pWatermark)->datum.i : 0);
nodesDestroyNode(pQuery->pRoot);
pQuery->pRoot = pCxt->pAstRoot;
} else {
pCxt->pAstRoot = pQuery->pRoot;
}
......
......@@ -559,10 +559,11 @@ void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet) {
snprintf(pEpSet->eps[i].fqdn, sizeof(pEpSet->eps[i].fqdn), "%s", (pSyncNode->pRaftCfg->cfg.nodeInfo)[i].nodeFqdn);
pEpSet->eps[i].port = (pSyncNode->pRaftCfg->cfg.nodeInfo)[i].nodePort;
(pEpSet->numOfEps)++;
sInfo("vgId:%d sync get retry epset: index:%d %s:%d", pSyncNode->vgId, i, pEpSet->eps[i].fqdn, pEpSet->eps[i].port);
sInfo("vgId:%d, sync get retry epset: index:%d %s:%d", pSyncNode->vgId, i, pEpSet->eps[i].fqdn,
pEpSet->eps[i].port);
}
pEpSet->inUse = (pSyncNode->pRaftCfg->cfg.myIndex + 1) % pEpSet->numOfEps;
sInfo("vgId:%d sync get retry epset in-use:%d", pSyncNode->vgId, pEpSet->inUse);
sInfo("vgId:%d, sync get retry epset in-use:%d", pSyncNode->vgId, pEpSet->inUse);
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
}
......@@ -2996,7 +2997,7 @@ void syncLogRecvAppendEntries(SSyncNode* pSyncNode, const SyncAppendEntries* pMs
"datalen:%d}, %s",
host, port, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, pMsg->privateTerm,
pMsg->dataLen, s);
syncNodeErrorLog(pSyncNode, logBuf);
syncNodeEventLog(pSyncNode, logBuf);
}
void syncLogSendAppendEntriesBatch(SSyncNode* pSyncNode, const SyncAppendEntriesBatch* pMsg, const char* s) {
......@@ -3022,7 +3023,7 @@ void syncLogRecvAppendEntriesBatch(SSyncNode* pSyncNode, const SyncAppendEntries
", pterm:%" PRIu64 ", commit:%" PRId64 ", datalen:%d, count:%d}, %s",
host, port, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->privateTerm, pMsg->commitIndex,
pMsg->dataLen, pMsg->dataCount, s);
syncNodeErrorLog(pSyncNode, logBuf);
syncNodeEventLog(pSyncNode, logBuf);
}
void syncLogSendAppendEntriesReply(SSyncNode* pSyncNode, const SyncAppendEntriesReply* pMsg, const char* s) {
......@@ -3046,5 +3047,5 @@ void syncLogRecvAppendEntriesReply(SSyncNode* pSyncNode, const SyncAppendEntries
"recv sync-append-entries-reply from %s:%d {term:%" PRIu64 ", pterm:%" PRIu64 ", success:%d, match:%" PRId64
"}, %s",
host, port, pMsg->term, pMsg->privateTerm, pMsg->success, pMsg->matchIndex, s);
syncNodeErrorLog(pSyncNode, logBuf);
syncNodeEventLog(pSyncNode, logBuf);
}
......@@ -741,7 +741,10 @@ class AnyState:
sCnt += 1
if (sCnt >= 2):
raise CrashGenError(
"Unexpected more than 1 success with task: {}".format(cls))
"Unexpected more than 1 success with task: {}, in task set: {}".format(
cls.__name__, # verified just now that isinstance(task, cls)
[c.__class__.__name__ for c in tasks]
))
def assertIfExistThenSuccess(self, tasks, cls):
sCnt = 0
......
......@@ -11,13 +11,13 @@
# -*- coding: utf-8 -*-
from collections import defaultdict
import random
import string
import requests
import time
import socket
import json
import toml
from .boundary import DataBoundary
import taos
from util.log import *
......@@ -25,6 +25,79 @@ from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
from util.constant import *
from dataclasses import dataclass,field
from typing import List
@dataclass
class DataSet:
ts_data : List[int] = field(default_factory=list)
int_data : List[int] = field(default_factory=list)
bint_data : List[int] = field(default_factory=list)
sint_data : List[int] = field(default_factory=list)
tint_data : List[int] = field(default_factory=list)
uint_data : List[int] = field(default_factory=list)
ubint_data : List[int] = field(default_factory=list)
usint_data : List[int] = field(default_factory=list)
utint_data : List[int] = field(default_factory=list)
float_data : List[float] = field(default_factory=list)
double_data : List[float] = field(default_factory=list)
bool_data : List[int] = field(default_factory=list)
vchar_data : List[str] = field(default_factory=list)
nchar_data : List[str] = field(default_factory=list)
def get_order_set(self,
rows,
int_step :int = 1,
bint_step :int = 1,
sint_step :int = 1,
tint_step :int = 1,
uint_step :int = 1,
ubint_step :int = 1,
usint_step :int = 1,
utint_step :int = 1,
float_step :float = 1,
double_step :float = 1,
bool_start :int = 1,
vchar_prefix:str = "vachar_",
vchar_step :int = 1,
nchar_prefix:str = "nchar_测试_",
nchar_step :int = 1,
ts_step :int = 1
):
for i in range(rows):
self.int_data.append( int(i * int_step % INT_MAX ))
self.bint_data.append( int(i * bint_step % BIGINT_MAX ))
self.sint_data.append( int(i * sint_step % SMALLINT_MAX ))
self.tint_data.append( int(i * tint_step % TINYINT_MAX ))
self.uint_data.append( int(i * uint_step % INT_UN_MAX ))
self.ubint_data.append( int(i * ubint_step % BIGINT_UN_MAX ))
self.usint_data.append( int(i * usint_step % SMALLINT_UN_MAX ))
self.utint_data.append( int(i * utint_step % TINYINT_UN_MAX ))
self.float_data.append( float(i * float_step % FLOAT_MAX ))
self.double_data.append( float(i * double_step % DOUBLE_MAX ))
self.bool_data.append( bool((i + bool_start) % 2 ))
self.vchar_data.append( f"{vchar_prefix}_{i * vchar_step}" )
self.nchar_data.append( f"{nchar_prefix}_{i * nchar_step}")
self.ts_data.append( int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000 - i * ts_step))
def get_disorder_set(self,
rows,
int_low :int = INT_MIN,
int_up :int = INT_MAX,
bint_low :int = BIGINT_MIN,
bint_up :int = BIGINT_MAX,
sint_low :int = SMALLINT_MIN,
sint_up :int = SMALLINT_MAX,
tint_low :int = TINYINT_MIN,
tint_up :int = TINYINT_MAX,
ubint_low :int = BIGINT_UN_MIN,
ubint_up :int = BIGINT_UN_MAX,
):
pass
class TDCom:
def __init__(self):
......@@ -372,6 +445,7 @@ class TDCom:
def getClientCfgPath(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
......@@ -650,7 +724,7 @@ class TDCom:
else:
column_value_str += f'{column_value}, '
idx += 1
column_value_str = column_value_str.rstrip()[:-1]
column_value_str = column_value_str.rstrip()[:-1]
insert_sql = f'insert into {dbname}.{tbname} values ({column_value_str});'
tsql.execute(insert_sql)
def getOneRow(self, location, containElm):
......@@ -662,12 +736,12 @@ class TDCom:
return res_list
else:
tdLog.exit(f"getOneRow out of range: row_index={location} row_count={self.query_row}")
def killProcessor(self, processorName):
def killProcessor(self, processorName):
if (platform.system().lower() == 'windows'):
os.system("TASKKILL /F /IM %s.exe"%processorName)
else:
os.system('pkill %s'%processorName)
os.system('pkill %s'%processorName)
def is_json(msg):
......@@ -680,4 +754,29 @@ def is_json(msg):
else:
return False
def get_path(tool="taosd"):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
paths = []
for root, dirs, files in os.walk(projPath):
if ((tool) in files or ("%s.exe"%tool) in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
paths.append(os.path.join(root, tool))
break
if (len(paths) == 0):
return ""
return paths[0]
def dict2toml(in_dict: dict, file:str):
if not isinstance(in_dict, dict):
return ""
with open(file, 'w') as f:
toml.dump(in_dict, f)
tdCom = TDCom()
......@@ -96,9 +96,9 @@ class TDSimClient:
for key, value in self.cfgDict.items():
self.cfg(key, value)
try:
if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
clientCfg = dict (updatecfgDict[0][0].get('clientCfg'))
for key, value in clientCfg.items():
self.cfg(key, value)
......@@ -244,7 +244,6 @@ class TDDnode:
# print(updatecfgDict)
isFirstDir = 1
if bool(updatecfgDict) and updatecfgDict[0] and updatecfgDict[0][0]:
print(updatecfgDict[0][0])
for key, value in updatecfgDict[0][0].items():
if key == "clientCfg" and self.remoteIP == "" and not platform.system().lower() == 'windows':
continue
......@@ -324,7 +323,6 @@ class TDDnode:
if os.system(cmd) != 0:
tdLog.exit(cmd)
self.running = 1
print("dnode:%d is running with %s " % (self.index, cmd))
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
if self.valgrind == 0:
time.sleep(0.1)
......@@ -358,7 +356,7 @@ class TDDnode:
# break
# elif bkey2 in line:
# popen.kill()
# break
# break
# if time.time() > timeout:
# print(time.time(),timeout)
# tdLog.exit('wait too long for taosd start')
......@@ -407,7 +405,6 @@ class TDDnode:
if os.system(cmd) != 0:
tdLog.exit(cmd)
self.running = 1
print("dnode:%d is running with %s " % (self.index, cmd))
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
if self.valgrind == 0:
time.sleep(0.1)
......@@ -664,7 +661,6 @@ class TDDnodes:
def stoptaosd(self, index):
self.check(index)
self.dnodes[index - 1].stoptaosd()
def start(self, index):
self.check(index)
......
......@@ -235,9 +235,17 @@ class TDSql:
tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
(self.sql, row, col, self.queryResult[row][col], data))
return
elif isinstance(data, float) and abs(self.queryResult[row][col] - data) <= 0.000001:
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
(self.sql, row, col, self.queryResult[row][col], data))
elif isinstance(data, float):
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001:
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
(self.sql, row, col, self.queryResult[row][col], data))
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001:
tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
(self.sql, row, col, self.queryResult[row][col], data))
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
return
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
......@@ -323,13 +331,32 @@ class TDSql:
args = (caller.filename, caller.lineno, self.sql, col_name_list, expect_col_name_list)
tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args)
def __check_equal(self, elm, expect_elm):
if not type(elm) in(list, tuple) and elm == expect_elm:
return True
if type(elm) in(list, tuple) and type(expect_elm) in(list, tuple):
if len(elm) != len(expect_elm):
return False
if len(elm) == 0:
return True
for i in range(len(elm)):
flag = self.__check_equal(elm[i], expect_elm[i])
if not flag:
return False
return True
return False
def checkEqual(self, elm, expect_elm):
if elm == expect_elm:
tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm))
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
tdLog.exit("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
return
if self.__check_equal(elm, expect_elm):
tdLog.info("sql:%s, elm:%s == expect_elm:%s" % (self.sql, elm, expect_elm))
return
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, elm, expect_elm)
tdLog.exit("%s(%d) failed: sql:%s, elm:%s != expect_elm:%s" % args)
def checkNotEqual(self, elm, expect_elm):
if elm != expect_elm:
......
import socket
from fabric2 import Connection
from util.log import *
from util.common import *
class TAdapter:
def __init__(self):
self.running = 0
self.deployed = 0
self.remoteIP = ""
self.taosadapter_cfg_dict = {
"debug" : True,
"taosConfigDir" : "",
"port" : 6041,
"logLevel" : "debug",
"cors" : {
"allowAllOrigins" : True,
},
"pool" : {
"maxConnect" : 4000,
"maxIdle" : 4000,
"idleTimeout" : "1h"
},
"ssl" : {
"enable" : False,
"certFile" : "",
"keyFile" : "",
},
"log" : {
"path" : "",
"rotationCount" : 30,
"rotationTime" : "24h",
"rotationSize" : "1GB",
"enableRecordHttpSql" : True,
"sqlRotationCount" : 2,
"sqlRotationTime" : "24h",
"sqlRotationSize" : "1GB",
},
"monitor" : {
"collectDuration" : "3s",
"incgroup" : False,
"pauseQueryMemoryThreshold" : 70,
"pauseAllMemoryThreshold" : 80,
"identity" : "",
"writeToTD" : True,
"user" : "root",
"password" : "taosdata",
"writeInterval" : "30s"
},
"opentsdb" : {
"enable" : False
},
"influxdb" : {
"enable" : False
},
"statsd" : {
"enable" : False
},
"collectd" : {
"enable" : False
},
"opentsdb_telnet" : {
"enable" : False
},
"node_exporter" : {
"enable" : False
},
"prometheus" : {
"enable" : False
},
}
# TODO: add taosadapter env:
# 1. init cfg.toml.dict :OK
# 2. dump dict to toml : OK
# 3. update cfg.toml.dict :OK
# 4. check adapter exists : OK
# 5. deploy adapter cfg : OK
# 6. adapter start : OK
# 7. adapter stop
def init(self, path, remoteIP=""):
self.path = path
self.remoteIP = remoteIP
binPath = get_path() + "/../../../"
binPath = os.path.realpath(binPath)
if path == "":
self.path = os.path.abspath(binPath + "../../")
else:
self.path = os.path.realpath(path)
if self.remoteIP:
try:
self.config = eval(remoteIP)
self.remote_conn = Connection(host=self.config["host"], port=self.config["port"], user=self.config["user"], connect_kwargs={'password':self.config["password"]})
except Exception as e:
tdLog.notice(e)
def update_cfg(self, update_dict :dict):
if not isinstance(update_dict, dict):
return
if "log" in update_dict and "path" in update_dict["log"]:
del update_dict["log"]["path"]
for key, value in update_dict.items():
if key in ["cors", "pool", "ssl", "log", "monitor", "opentsdb", "influxdb", "statsd", "collectd", "opentsdb_telnet", "node_exporter", "prometheus"]:
if isinstance(value, dict):
for k, v in value.items():
self.taosadapter_cfg_dict[key][k] = v
else:
self.taosadapter_cfg_dict[key] = value
def check_adapter(self):
if getPath(tool="taosadapter"):
return False
else:
return True
def remote_exec(self, updateCfgDict, execCmd):
remoteCfgDict = copy.deepcopy(updateCfgDict)
if "log" in remoteCfgDict and "path" in remoteCfgDict["log"]:
del remoteCfgDict["log"]["path"]
remoteCfgDictStr = base64.b64encode(toml.dumps(remoteCfgDict).encode()).decode()
execCmdStr = base64.b64encode(execCmd.encode()).decode()
with self.remote_conn.cd((self.config["path"]+sys.path[0].replace(self.path, '')).replace('\\','/')):
self.remote_conn.run(f"python3 ./test.py -D {remoteCfgDictStr} -e {execCmdStr}" )
def cfg(self, option, value):
cmd = f"echo {option} = {value} >> {self.cfg_path}"
if os.system(cmd) != 0:
tdLog.exit(cmd)
def deploy(self, *update_cfg_dict):
self.log_dir = f"{self.path}/sim/dnode1/log"
self.cfg_dir = f"{self.path}/sim/dnode1/cfg"
self.cfg_path = f"{self.cfg_dir}/taosadapter.toml"
cmd = f"touch {self.cfg_path}"
if os.system(cmd) != 0:
tdLog.exit(cmd)
self.taosadapter_cfg_dict["log"]["path"] = self.log_dir
if bool(update_cfg_dict):
self.update_cfg(update_dict=update_cfg_dict)
if (self.remoteIP == ""):
dict2toml(self.taosadapter_cfg_dict, self.cfg_path)
else:
self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.deploy(update_cfg_dict)")
self.deployed = 1
tdLog.debug(f"taosadapter is deployed and configured by {self.cfg_path}")
def start(self):
bin_path = get_path(tool="taosadapter")
if (bin_path == ""):
tdLog.exit("taosadapter not found!")
else:
tdLog.info(f"taosadapter found: {bin_path}")
if platform.system().lower() == 'windows':
cmd = f"mintty -h never {bin_path} -c {self.cfg_dir}"
else:
cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null 2>&1 & "
if self.remoteIP:
self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()")
self.running = 1
else:
os.system(f"rm -rf {self.log_dir}/taosadapter*")
if os.system(cmd) != 0:
tdLog.exit(cmd)
self.running = 1
tdLog.debug(f"taosadapter is running with {cmd} " )
time.sleep(0.1)
taosadapter_port = self.taosadapter_cfg_dict["port"]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
try:
res = s.connect_ex((self.remoteIP, taosadapter_port))
s.shutdown(2)
if res == 0:
tdLog.info(f"the taosadapter has been started, using port:{taosadapter_port}")
else:
tdLog.info(f"the taosadapter do not started!!!")
except socket.error as e:
tdLog.notice("socket connect error!")
finally:
if s:
s.close()
# tdLog.debug("the taosadapter has been started.")
time.sleep(1)
def start_taosadapter(self):
"""
use this method, must deploy taosadapter
"""
bin_path = get_path(tool="taosadapter")
if (bin_path == ""):
tdLog.exit("taosadapter not found!")
else:
tdLog.info(f"taosadapter found: {bin_path}")
if self.deployed == 0:
tdLog.exit("taosadapter is not deployed")
if platform.system().lower() == 'windows':
cmd = f"mintty -h never {bin_path} -c {self.cfg_dir}"
else:
cmd = f"nohup {bin_path} -c {self.cfg_path} > /dev/null 2>&1 & "
if self.remoteIP:
self.remote_exec(self.taosadapter_cfg_dict, f"tAdapter.deployed=1\ntAdapter.log_dir={self.log_dir}\ntAdapter.cfg_dir={self.cfg_dir}\ntAdapter.start()")
self.running = 1
else:
if os.system(cmd) != 0:
tdLog.exit(cmd)
self.running = 1
tdLog.debug(f"taosadapter is running with {cmd} " )
time.sleep(0.1)
def stop(self, force_kill=False):
signal = "-SIGKILL" if force_kill else "-SIGTERM"
if self.remoteIP:
self.remote_exec(self.taosadapter_cfg_dict, "tAdapter.running=1\ntAdapter.stop()")
tdLog.info("stop taosadapter")
return
toBeKilled = "taosadapter"
if self.running != 0:
psCmd = f"ps -ef|grep -w {toBeKilled}| grep -v grep | awk '{{print $2}}'"
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = f"kill {signal} {processID} > /dev/null 2>&1"
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
if not platform.system().lower() == 'windows':
for port in range(6030, 6041):
fuserCmd = f"fuser -k -n tcp {port} > /dev/null"
os.system(fuserCmd)
self.running = 0
tdLog.debug(f"taosadapter is stopped by kill {signal}")
tAdapter = TAdapter()
\ No newline at end of file
......@@ -87,18 +87,17 @@
./test.sh -f tsim/parser/alter_column.sim
./test.sh -f tsim/parser/alter_stable.sim
./test.sh -f tsim/parser/alter.sim
# nojira ./test.sh -f tsim/parser/alter1.sim
# jira ./test.sh -f tsim/parser/alter1.sim
./test.sh -f tsim/parser/auto_create_tb_drop_tb.sim
# jira ./test.sh -f tsim/parser/auto_create_tb.sim
./test.sh -f tsim/parser/between_and.sim
./test.sh -f tsim/parser/binary_escapeCharacter.sim
# nojira ./test.sh -f tsim/parser/col_arithmetic_operation.sim
# nojira ./test.sh -f tsim/parser/columnValue.sim
# jira ./test.sh -f tsim/parser/col_arithmetic_operation.sim
# jira ./test.sh -f tsim/parser/columnValue.sim
## ./test.sh -f tsim/parser/commit.sim
## ./test.sh -f tsim/parser/condition.sim
## ./test.sh -f tsim/parser/constCol.sim
# ./test.sh -f tsim/parser/create_db.sim
## ./test.sh -f tsim/parser/create_db__for_community_version.sim
./test.sh -f tsim/parser/create_db.sim
# ./test.sh -f tsim/parser/create_mt.sim
# ./test.sh -f tsim/parser/create_tb.sim
## ./test.sh -f tsim/parser/create_tb_with_tag_name.sim
......@@ -235,15 +234,15 @@
./test.sh -f tsim/stream/drop_stream.sim
./test.sh -f tsim/stream/distributeInterval0.sim
./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
# ./test.sh -f tsim/stream/distributesession0.sim
./test.sh -f tsim/stream/distributeSession0.sim
./test.sh -f tsim/stream/session0.sim
./test.sh -f tsim/stream/session1.sim
./test.sh -f tsim/stream/state0.sim
./test.sh -f tsim/stream/triggerInterval0.sim
# ./test.sh -f tsim/stream/triggerSession0.sim
./test.sh -f tsim/stream/triggerSession0.sim
./test.sh -f tsim/stream/partitionby.sim
./test.sh -f tsim/stream/partitionby1.sim
# ./test.sh -f tsim/stream/schedSnode.sim
# unsupport ./test.sh -f tsim/stream/schedSnode.sim
./test.sh -f tsim/stream/windowClose.sim
./test.sh -f tsim/stream/ignoreExpiredData.sim
./test.sh -f tsim/stream/sliding.sim
......@@ -294,12 +293,12 @@
./test.sh -f tsim/db/basic3.sim -m
./test.sh -f tsim/db/error1.sim -m
./test.sh -f tsim/insert/backquote.sim -m
# nojira ./test.sh -f tsim/parser/fourArithmetic-basic.sim -m
# unsupport ./test.sh -f tsim/parser/fourArithmetic-basic.sim -m
./test.sh -f tsim/query/interval-offset.sim -m
./test.sh -f tsim/tmq/basic3.sim -m
./test.sh -f tsim/stable/vnode3.sim -m
./test.sh -f tsim/qnode/basic1.sim -m
# nojira ./test.sh -f tsim/mnode/basic1.sim -m
# unsupport ./test.sh -f tsim/mnode/basic1.sim -m
# --- sma
./test.sh -f tsim/sma/drop_sma.sim
......
......@@ -23,10 +23,10 @@ sql create database $db
sql use $db
sql show databases
if $rows != 1 then
if $rows != 3 then
return -1
endi
if $data00 != $db then
if $data20 != $db then
return -1
endi
sql drop database $db
......@@ -38,10 +38,10 @@ sql CREATE DATABASE $db
sql use $db
sql show databases
if $rows != 1 then
if $rows != 3 then
return -1
endi
if $data00 != $db then
if $data20 != $db then
return -1
endi
sql drop database $db
......@@ -87,7 +87,7 @@ print create_db.sim case4: db_already_exists
sql create database db0
sql create database db0
sql show databases
if $rows != 1 then
if $rows != 3 then
return -1
endi
sql drop database db0
......@@ -107,29 +107,21 @@ $ctime = 36000 # 10 hours
$wal = 1 # valid value is 1, 2
$comp = 1 # max=32, automatically trimmed when exceeding
sql create database $db replica $replica duration $duration keep $keep maxrows $rows_db cache $cache blocks 4 ctime $ctime wal $wal comp $comp
sql create database $db replica $replica duration $duration keep $keep maxrows $rows_db wal $wal comp $comp
sql show databases
if $rows != 1 then
if $rows != 3 then
return -1
endi
if $data00 != $db then
if $data20 != $db then
return -1
endi
if $data04 != $replica then
if $data24 != $replica then
return -1
endi
if $data06 != $duration then
if $data26 != 14400m then
return -1
endi
if $data07 != 365,365,365 then
return -1
endi
print data08 = $data07
if $data08 != $cache then
print expect $cache, actual:$data08
return -1
endi
if $data09 != 4 then
if $data27 != 525600m,525600m,525600m then
return -1
endi
......@@ -160,56 +152,56 @@ sql_error create database $db keep 12,11
sql_error create database $db keep 365001,365001,365001
sql create database dbk0 keep 19
sql show databases
if $rows != 1 then
if $rows != 3 then
return -1
endi
if $data07 != 19,19,19 then
if $data27 != 27360m,27360m,27360m then
return -1
endi
sql drop database dbk0
sql create database dbka keep 19,20
sql show databases
if $rows != 1 then
if $rows != 3 then
return -1
endi
if $data07 != 19,20,20 then
if $data27 != 27360m,28800m,28800m then
return -1
endi
sql drop database dbka
sql create database dbk1 keep 11,11,11
sql show databases
if $rows != 1 then
if $rows != 3 then
return -1
endi
if $data07 != 11,11,11 then
if $data27 != 15840m,15840m,15840m then
return -1
endi
sql drop database dbk1
sql create database dbk2 keep 11,12,13
sql show databases
if $rows != 1 then
if $rows != 3 then
return -1
endi
if $data07 != 11,12,13 then
if $data27 != 15840m,17280m,18720m then
return -1
endi
sql drop database dbk2
sql create database dbk3 keep 11,11,13
sql show databases
if $rows != 1 then
if $rows != 3 then
return -1
endi
if $data07 != 11,11,13 then
if $data27 != 15840m,15840m,18720m then
return -1
endi
sql drop database dbk3
sql create database dbk4 keep 11,13,13
sql show databases
if $rows != 1 then
if $rows != 3 then
return -1
endi
if $data07 != 11,13,13 then
if $data27 != 15840m,18720m,18720m then
return -1
endi
sql drop database dbk4
......@@ -233,38 +225,31 @@ sql_error create database $db ctime 29
sql_error create database $db ctime 40961
# wal {0, 2}
sql create database testwal wal 0
sql_error create database testwal wal 0
sql show databases
if $rows != 1 then
if $rows != 2 then
return -1
endi
sql show databases
print wallevel $data12_testwal
if $data12_testwal != 0 then
return -1
endi
sql drop database testwal
sql create database testwal wal 1
sql show databases
if $rows != 1 then
if $rows != 3 then
return -1
endi
sql show databases
print wallevel $data12_testwal
if $data12_testwal != 1 then
print wallevel $data13_testwal
if $data13_testwal != 1 then
return -1
endi
sql drop database testwal
sql create database testwal wal 2
sql show databases
if $rows != 1 then
if $rows != 3 then
return -1
endi
print wallevel $data12_testwal
if $data12_testwal != 2 then
print wallevel $data13_testwal
if $data13_testwal != 2 then
return -1
endi
sql drop database testwal
......@@ -278,7 +263,7 @@ sql_error create database $db comp 3
sql_error drop database $db
sql show databases
if $rows != 0 then
if $rows != 2 then
return -1
endi
......
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
print ======================== dnode1 start
$dbPrefix = fi_in_db
$tbPrefix = fi_in_tb
$mtPrefix = fi_in_mt
$tbNum = 10
$rowNum = 20
$totalNum = 200
print excuting test script create_db.sim
print =============== set up
$i = 0
$db = $dbPrefix . $i
$mt = $mtPrefix . $i
sql_error createdatabase $db
sql create database $db
sql use $db
sql show databases
if $rows != 1 then
return -1
endi
if $data00 != $db then
return -1
endi
sql drop database $db
# case1: case_insensitivity test
print =========== create_db.sim case1: case insensitivity test
sql_error CREATEDATABASE $db
sql CREATE DATABASE $db
sql use $db
sql show databases
if $rows != 1 then
return -1
endi
if $data00 != $db then
return -1
endi
sql drop database $db
print case_insensitivity test passed
# case2: illegal_db_name test
print =========== create_db.sim case2: illegal_db_name test
$illegal_db1 = 1db
$illegal_db2 = d@b
sql_error create database $illegal_db1
sql_error create database $illegal_db2
print illegal_db_name test passed
# case3: chinese_char_in_db_name test
print ========== create_db.sim case3: chinese_char_in_db_name test
$CN_db1 = 数据库
$CN_db2 = 数据库1
$CN_db3 = db数据库1
sql_error create database $CN_db1
sql_error create database $CN_db2
sql_error create database $CN_db3
#sql show databases
#if $rows != 3 then
# return -1
#endi
#if $data00 != $CN_db1 then
# return -1
#endi
#if $data10 != $CN_db2 then
# return -1
#endi
#if $data20 != $CN_db3 then
# return -1
#endi
#sql drop database $CN_db1
#sql drop database $CN_db2
#sql drop database $CN_db3
print case_chinese_char_in_db_name test passed
# case4: db_already_exists
print create_db.sim case4: db_already_exists
sql create database db0
sql create database db0
sql show databases
if $rows != 1 then
return -1
endi
sql drop database db0
print db_already_exists test passed
# case5: db_meta_data
print create_db.sim case5: db_meta_data test
# cfg params
$replica = 1 # max=3
$duration = 10
$keep = 365
$rows_db = 1000
$cache = 16 # 16MB
$ablocks = 100
$tblocks = 32 # max=512, automatically trimmed when exceeding
$ctime = 36000 # 10 hours
$wal = 1 # valid value is 1, 2
$comp = 1 # max=32, automatically trimmed when exceeding
sql create database $db replica $replica duration $duration keep $keep maxrows $rows_db cache $cache blocks 4 ctime $ctime wal $wal comp $comp
sql show databases
if $rows != 1 then
return -1
endi
if $data00 != $db then
return -1
endi
if $data04 != $replica then
return -1
endi
if $data06 != $duration then
return -1
endi
if $data07 != 365 then
return -1
endi
print data08 = $data07
if $data08 != $cache then
print expect $cache, actual:$data08
return -1
endi
if $data09 != 4 then
return -1
endi
sql drop database $db
## param range tests
# replica [1,3]
#sql_error create database $db replica 0
sql_error create database $db replica 4
# day [1, 3650]
sql_error create database $db day 0
sql_error create database $db day 3651
# keep [1, infinity]
sql_error create database $db keep 0
sql_error create database $db keep 0,0,0
sql_error create database $db keep 3,3,3
sql_error create database $db keep 3
sql_error create database $db keep 11.0
sql_error create database $db keep 11.0,11.0,11.0
sql_error create database $db keep "11","11","11"
sql_error create database $db keep "11"
sql_error create database $db keep 13,12,11
sql_error create database $db keep 11,12,11
sql_error create database $db keep 12,11,12
sql_error create database $db keep 11,12,13
sql_error create database $db keep 11,12,13,14
sql_error create database $db keep 11,11
sql_error create database $db keep 365001,365001,365001
sql_error create database $db keep 365001
sql create database dbk1 keep 11
sql show databases
if $rows != 1 then
return -1
endi
if $data07 != 11 then
return -1
endi
sql drop database dbk1
sql create database dbk2 keep 12
sql show databases
if $rows != 1 then
return -1
endi
if $data07 != 12 then
return -1
endi
sql drop database dbk2
sql create database dbk3 keep 11
sql show databases
if $rows != 1 then
return -1
endi
if $data07 != 11 then
return -1
endi
sql drop database dbk3
sql create database dbk4 keep 13
sql show databases
if $rows != 1 then
return -1
endi
if $data07 != 13 then
return -1
endi
sql drop database dbk4
#sql_error create database $db keep 3651
# rows [200, 10000]
sql_error create database $db maxrows 199
#sql_error create database $db maxrows 10001
# cache [100, 10485760]
sql_error create database $db cache 0
#sql_error create database $db cache 10485761
# blocks [32, 4096 overwriten by 4096 if exceeds, Note added:2018-10-24]
#sql_error create database $db tblocks 31
#sql_error create database $db tblocks 4097
# ctime [30, 40960]
sql_error create database $db ctime 29
sql_error create database $db ctime 40961
# wal {0, 2}
#sql_error create database $db wal 0
sql_error create database $db wal -1
sql_error create database $db wal 3
# comp {0, 1, 2}
sql_error create database $db comp -1
sql_error create database $db comp 3
sql_error drop database $db
sql show databases
if $rows != 0 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
......@@ -4,23 +4,17 @@ system sh/exec.sh -n dnode1 -s start
sql connect
print ======================== dnode1 start
$db = testdb
sql create database $db
sql use $db
sql create stable st2 (ts timestamp, f1 int) tags (id int, t1 int, t2 nchar(4), t3 double)
sql insert into tb1 using st2 (id, t1) tags(1,2) values (now, 1)
sql select id,t1,t2,t3 from tb1
if $rows != 1 then
return -1
endi
if $data00 != 1 then
return -1
endi
......@@ -35,124 +29,101 @@ if $data03 != NULL then
endi
sql create table tb2 using st2 (t2,t3) tags ("12",22.0)
sql select id,t1,t2,t3 from tb2;
if $rows != 1 then
sql show tags from tb2
if $rows != 4 then
return -1
endi
if $data00 != NULL then
if $data05 != NULL then
return -1
endi
if $data01 != NULL then
if $data15 != NULL then
return -1
endi
if $data02 != 12 then
if $data25 != 12 then
return -1
endi
if $data03 != 22.000000000 then
if $data35 != 22.000000000 then
return -1
endi
sql create table tb3 using st2 tags (1,2,"3",33.0);
sql select id,t1,t2,t3 from tb3;
if $rows != 1 then
sql show tags from tb3;
if $rows != 4 then
return -1
endi
if $data00 != 1 then
if $data05 != 1 then
return -1
endi
if $data01 != 2 then
if $data15 != 2 then
return -1
endi
if $data02 != 3 then
if $data25 != 3 then
return -1
endi
if $data03 != 33.000000000 then
if $data35 != 33.000000000 then
return -1
endi
sql insert into tb4 using st2 tags(1,2,"33",44.0) values (now, 1);
sql select id,t1,t2,t3 from tb4;
if $rows != 1 then
sql show tags from tb4;
if $rows != 4 then
return -1
endi
if $data00 != 1 then
if $data05 != 1 then
return -1
endi
if $data01 != 2 then
if $data15 != 2 then
return -1
endi
if $data02 != 33 then
if $data25 != 33 then
return -1
endi
if $data03 != 44.000000000 then
if $data35 != 44.000000000 then
return -1
endi
sql_error create table tb5 using st2() tags (3,3,"3",33.0);
sql_error create table tb6 using st2 (id,t1) tags (3,3,"3",33.0);
sql_error create table tb7 using st2 (id,t1) tags (3);
sql_error create table tb8 using st2 (ide) tags (3);
sql_error create table tb9 using st2 (id);
sql_error create table tb10 using st2 (id t1) tags (1,1);
sql_error create table tb10 using st2 (id,,t1) tags (1,1,1);
sql_error create table tb11 using st2 (id,t1,) tags (1,1,1);
sql create table tb12 using st2 (t1,id) tags (2,1);
sql select id,t1,t2,t3 from tb12;
if $rows != 1 then
sql show tags from tb12;
if $rows != 5 then
return -1
endi
if $data00 != 1 then
if $data05 != 1 then
return -1
endi
if $data01 != 2 then
if $data15 != 2 then
return -1
endi
if $data02 != NULL then
if $data25 != NULL then
return -1
endi
if $data03 != NULL then
if $data35 != NULL then
return -1
endi
sql create table tb13 using st2 ("t1",'id') tags (2,1);
sql select id,t1,t2,t3 from tb13;
if $rows != 1 then
sql show tags from tb13;
if $rows != 2 then
return -1
endi
if $data00 != 1 then
if $data05 != 1 then
return -1
endi
if $data01 != 2 then
if $data15 != 2 then
return -1
endi
if $data02 != NULL then
if $data25 != NULL then
return -1
endi
if $data03 != NULL then
if $data35 != NULL then
return -1
endi
......
......@@ -51,8 +51,6 @@ while $i < $half
$i = $i + 1
endw
sleep 100
$i = 1
$tb = $tbPrefix . $i
......@@ -300,8 +298,6 @@ while $i < 1
endw
system sh/exec.sh -n dnode1 -s stop -x SIGINT
sleep 500
system sh/exec.sh -n dnode1 -s start
sql_error select * from wh_mt0 where c3 = 'abc' and tbname in ('test_null_filter');
......@@ -349,7 +345,6 @@ if $data01 != 2 then
return -1
endi
sql insert into where_ts values(now, 5);
sleep 10
sql select * from (select * from where_ts) where ts<now;
if $rows != 5 then
return -1
......
......@@ -472,7 +472,8 @@ sql create table t2 using st tags(2,2,2);
sql create table t3 using st tags(2,2,2);
sql create table t4 using st tags(2,2,2);
sql create table t5 using st tags(2,2,2);
sql create stream streams2 trigger at_once into streamt as select _wstart, count(*) c1, sum(a) c3,max(b) c4 from st partition by tbname interval(10s)
sql create stream streams2 trigger at_once into streamt as select _wstart, count(*) c1, sum(a) c3,max(b) c4 from st partition by tbname interval(10s);
sql create stream streams3 trigger at_once into streamt3 as select _wstart, count(*) c1, sum(a) c3,max(b) c4, now c5 from st partition by tbname interval(10s);
sql insert into t1 values(1648791213000,1,1,1,1.0) t2 values(1648791213000,2,2,2,2.0) t3 values(1648791213000,3,3,3,3.0) t4 values(1648791213000,4,4,4,4.0);
......@@ -571,4 +572,20 @@ if $data02 != 8 then
goto loop2
endi
$loop_count = 0
loop3:
sleep 300
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
sql select count(*) from streamt3;
# row 0
if $data00 != 5 then
print =====data00=$data00
goto loop3
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
......@@ -9,31 +9,41 @@ from util.dnodes import *
PRIMARY_COL = "ts"
INT_COL = "c_int"
BINT_COL = "c_bint"
SINT_COL = "c_sint"
TINT_COL = "c_tint"
FLOAT_COL = "c_float"
DOUBLE_COL = "c_double"
BOOL_COL = "c_bool"
TINT_UN_COL = "c_tint_un"
SINT_UN_COL = "c_sint_un"
BINT_UN_COL = "c_bint_un"
INT_UN_COL = "c_int_un"
BINARY_COL = "c_binary"
NCHAR_COL = "c_nchar"
TS_COL = "c_ts"
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
INT_COL = "c_int"
BINT_COL = "c_bint"
SINT_COL = "c_sint"
TINT_COL = "c_tint"
FLOAT_COL = "c_float"
DOUBLE_COL = "c_double"
BOOL_COL = "c_bool"
TINT_UN_COL = "c_utint"
SINT_UN_COL = "c_usint"
BINT_UN_COL = "c_ubint"
INT_UN_COL = "c_uint"
BINARY_COL = "c_binary"
NCHAR_COL = "c_nchar"
TS_COL = "c_ts"
NUM_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, TINT_UN_COL, SINT_UN_COL, BINT_UN_COL, INT_UN_COL]
CHAR_COL = [BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [BOOL_COL, ]
TS_TYPE_COL = [TS_COL, ]
INT_TAG = "t_int"
ALL_COL = [PRIMARY_COL, INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BINARY_COL, NCHAR_COL, BOOL_COL, TS_COL]
TAG_COL = [INT_TAG]
## insert data args:
TIME_STEP = 10000
NOW = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
# init db/table
DBNAME = "db"
STBNAME = "stb1"
CTBNAME = "ct1"
NTBNAME = "nt1"
@dataclass
class DataSet:
ts_data : List[int] = field(default_factory=list)
......@@ -152,29 +162,31 @@ class TDTestCase:
self.test_create_databases()
self.test_create_stb()
def __create_tb(self):
def __create_tb(self, stb=STBNAME, ctb_num=20, ntbnum=1, rsma=False):
tdLog.printNoPrefix("==========step: create table")
create_stb_sql = f'''create table stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
) tags (t1 int)
'''
create_ntb_sql = f'''create table t1(
create_stb_sql = f'''create table {stb}(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
)
) tags ({INT_TAG} int)
'''
for i in range(ntbnum):
create_ntb_sql = f'''create table nt{i+1}(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp,
{TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned,
{INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned
)
'''
tdSql.execute(create_stb_sql)
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
for i in range(ctb_num):
tdSql.execute(f'create table ct{i+1} using {stb} tags ( {i+1} )')
def __data_set(self, rows):
data_set = DataSet()
......@@ -220,7 +232,7 @@ class TDTestCase:
tdSql.execute( f"insert into ct1 values ( {NOW - i * TIME_STEP}, {row_data} )" )
tdSql.execute( f"insert into ct2 values ( {NOW - i * int(TIME_STEP * 0.6)}, {neg_row_data} )" )
tdSql.execute( f"insert into ct4 values ( {NOW - i * int(TIME_STEP * 0.8) }, {row_data} )" )
tdSql.execute( f"insert into t1 values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )" )
tdSql.execute( f"insert into {NTBNAME} values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )" )
tdSql.execute( f"insert into ct2 values ( {NOW + int(TIME_STEP * 0.6)}, {null_data} )" )
tdSql.execute( f"insert into ct2 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.6)}, {null_data} )" )
......@@ -230,9 +242,9 @@ class TDTestCase:
tdSql.execute( f"insert into ct4 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.8)}, {null_data} )" )
tdSql.execute( f"insert into ct4 values ( {NOW - self.rows * int(TIME_STEP * 0.39)}, {null_data} )" )
tdSql.execute( f"insert into t1 values ( {NOW + int(TIME_STEP * 1.2)}, {null_data} )" )
tdSql.execute( f"insert into t1 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )" )
tdSql.execute( f"insert into t1 values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )" )
tdSql.execute( f"insert into {NTBNAME} values ( {NOW + int(TIME_STEP * 1.2)}, {null_data} )" )
tdSql.execute( f"insert into {NTBNAME} values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )" )
tdSql.execute( f"insert into {NTBNAME} values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )" )
def run(self):
......
......@@ -325,10 +325,17 @@ class TDTestCase:
def __sma_create_check(self, sma:SMAschema):
if self.updatecfgDict["querySmaOptimize"] == 0:
return False
# TODO: if database is a rollup-db, can not create sma index
# tdSql.query("select database()")
# if sma.rollup_db :
# return False
tdSql.query("select database()")
dbname = tdSql.getData(0,0)
tdSql.query("show databases")
for row in tdSql.queryResult:
if row[0] == dbname:
if row[-1] is None:
continue
if ":" in row[-1]:
sma.rollup_db = True
if sma.rollup_db :
return False
tdSql.query("show stables")
if not sma.tbname:
return False
......@@ -379,12 +386,15 @@ class TDTestCase:
tdSql.query(self.__create_sma_index(sma))
self.sma_count += 1
self.sma_created_index.append(sma.index_name)
tdSql.query("show streams")
tdSql.query(self.__show_sma_index(sma))
tdSql.checkRows(self.sma_count)
tdSql.checkData(0, 2, sma.tbname)
else:
tdSql.error(self.__create_sma_index(sma))
def __drop_sma_index(self, sma:SMAschema):
sql = f"{sma.drop} {sma.drop_flag} {sma.index_name}"
return sql
......@@ -402,12 +412,12 @@ class TDTestCase:
def sma_drop_check(self, sma:SMAschema):
if self.__sma_drop_check(sma):
tdSql.query(self.__drop_sma_index(sma))
print(self.__drop_sma_index(sma))
self.sma_count -= 1
self.sma_created_index = list(filter(lambda x: x != sma.index_name, self.sma_created_index))
tdSql.query("show streams")
tdSql.checkRows(self.sma_count)
else:
tdSql.error(self.__drop_sma_index(sma))
......@@ -614,20 +624,20 @@ class TDTestCase:
self.__insert_data()
self.all_test()
#tdLog.printNoPrefix("==========step2:create table in rollup database")
#tdSql.execute("create database db3 retentions 1s:4m,2s:8m,3s:12m")
#tdSql.execute("use db3")
# self.__create_tb()
#tdSql.execute(f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma({INT_COL}) ")
#self.all_test()
# self.__insert_data()
tdLog.printNoPrefix("==========step2:create table in rollup database")
tdSql.execute("create database db3 retentions 1s:4m,2s:8m,3s:12m")
tdSql.execute("use db3")
tdSql.execute(f"create stable stb1 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma({INT_COL}) ")
self.all_test()
tdSql.execute("drop database if exists db1 ")
tdSql.execute("drop database if exists db2 ")
tdDnodes.stop(1)
tdDnodes.start(1)
# tdDnodes.stop(1)
# tdDnodes.start(1)
tdSql.execute("flush database db ")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
......
此差异已折叠。
......@@ -20,12 +20,13 @@ from util.sqlset import TDSetSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(),logSql)
tdSql.init(conn.cursor(),False)
self.rowNum = 10
self.ts = 1537146000000
self.setsql = TDSetSql()
self.ntbname = 'ntb'
self.stbname = 'stb'
self.dbname = "db"
self.ntbname = f"{self.dbname}.ntb"
self.stbname = f'{self.dbname}.stb'
self.binary_length = 20 # the length of binary for column_dict
self.nchar_length = 20 # the length of nchar for column_dict
self.column_dict = {
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -13,190 +13,195 @@ class TDTestCase:
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
def run(self): # sourcery skip: extract-duplicate-method
def run(self):
dbname = "db"
stb = f"{dbname}.stb1"
rows = 10
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute(
'''create table if not exists supt
f'''create table if not exists {stb}
(ts timestamp, c1 int, c2 float, c3 bigint, c4 double, c5 smallint, c6 tinyint)
tags(location binary(64), type int, isused bool , family nchar(64))'''
)
tdSql.execute("create table t1 using supt tags('beijing', 1, 1, 'nchar1')")
tdSql.execute("create table t2 using supt tags('shanghai', 2, 0, 'nchar2')")
tdSql.execute(f"create table {dbname}.t1 using {stb} tags('beijing', 1, 1, 'nchar1')")
tdSql.execute(f"create table {dbname}.t2 using {stb} tags('shanghai', 2, 0, 'nchar2')")
tdLog.printNoPrefix("==========step2:insert data")
for i in range(10):
for i in range(rows):
tdSql.execute(
f"insert into t1 values (now()+{i}m, {32767+i}, {20.0+i/10}, {2**31+i}, {3.4*10**38+i/10}, {127+i}, {i})"
f"insert into {dbname}.t1 values (now()+{i}m, {32767+i}, {20.0+i/10}, {2**31+i}, {3.4*10**38+i/10}, {127+i}, {i})"
)
tdSql.execute(
f"insert into t2 values (now()-{i}m, {-32767-i}, {20.0-i/10}, {-i-2**31}, {-i/10-3.4*10**38}, {-127-i}, {-i})"
f"insert into {dbname}.t2 values (now()-{i}m, {-32767-i}, {20.0-i/10}, {-i-2**31}, {-i/10-3.4*10**38}, {-127-i}, {-i})"
)
tdSql.execute(
f"insert into t1 values (now()+11m, {2**31-1}, {pow(10,37)*34}, {pow(2,63)-1}, {1.7*10**308}, 32767, 127)"
f"insert into {dbname}.t1 values (now()+11m, {2**31-1}, {pow(10,37)*34}, {pow(2,63)-1}, {1.7*10**308}, 32767, 127)"
)
tdSql.execute(
f"insert into t2 values (now()-11m, {1-2**31}, {-3.4*10**38}, {1-2**63}, {-1.7*10**308}, -32767, -127)"
f"insert into {dbname}.t2 values (now()-11m, {1-2**31}, {-3.4*10**38}, {1-2**63}, {-1.7*10**308}, -32767, -127)"
)
tdSql.execute(
f"insert into t2 values (now()-12m, null , {-3.4*10**38}, null , {-1.7*10**308}, null , null)"
f"insert into {dbname}.t2 values (now()-12m, null , {-3.4*10**38}, null , {-1.7*10**308}, null , null)"
)
tdLog.printNoPrefix("==========step3:query timestamp type")
tdSql.query("select * from t1 where ts between now()-1m and now()+10m")
tdSql.checkRows(10)
tdSql.query("select * from t1 where ts between '2021-01-01 00:00:00.000' and '2121-01-01 00:00:00.000'")
tdSql.query(f"select * from {dbname}.t1 where ts between now()-1m and now()+10m")
tdSql.checkRows(rows)
tdSql.query(f"select * from {dbname}.t1 where ts between '2021-01-01 00:00:00.000' and '2121-01-01 00:00:00.000'")
# tdSql.checkRows(11)
tdSql.query("select * from t1 where ts between '1969-01-01 00:00:00.000' and '1969-12-31 23:59:59.999'")
tdSql.query(f"select * from {dbname}.t1 where ts between '1969-01-01 00:00:00.000' and '1969-12-31 23:59:59.999'")
# tdSql.checkRows(0)
tdSql.query("select * from t1 where ts between -2793600 and 31507199")
tdSql.query(f"select * from {dbname}.t1 where ts between -2793600 and 31507199")
tdSql.checkRows(0)
tdSql.query("select * from t1 where ts between 1609430400000 and 4765104000000")
tdSql.checkRows(11)
tdSql.query(f"select * from {dbname}.t1 where ts between 1609430400000 and 4765104000000")
tdSql.checkRows(rows+1)
tdLog.printNoPrefix("==========step4:query int type")
tdSql.query("select * from t1 where c1 between 32767 and 32776")
tdSql.checkRows(10)
tdSql.query("select * from t1 where c1 between 32766.9 and 32776.1")
tdSql.checkRows(10)
tdSql.query("select * from t1 where c1 between 32776 and 32767")
tdSql.query(f"select * from {dbname}.t1 where c1 between 32767 and 32776")
tdSql.checkRows(rows)
tdSql.query(f"select * from {dbname}.t1 where c1 between 32766.9 and 32776.1")
tdSql.checkRows(rows)
tdSql.query(f"select * from {dbname}.t1 where c1 between 32776 and 32767")
tdSql.checkRows(0)
tdSql.query("select * from t1 where c1 between 'a' and 'e'")
tdSql.query(f"select * from {dbname}.t1 where c1 between 'a' and 'e'")
tdSql.checkRows(0)
# tdSql.query("select * from t1 where c1 between 0x64 and 0x69")
# tdSql.query("select * from {dbname}.t1 where c1 between 0x64 and 0x69")
# tdSql.checkRows(6)
tdSql.query("select * from t1 where c1 not between 100 and 106")
tdSql.checkRows(11)
tdSql.query(f"select * from t1 where c1 between {2**31-2} and {2**31+1}")
tdSql.query(f"select * from {dbname}.t1 where c1 not between 100 and 106")
tdSql.checkRows(rows+1)
tdSql.query(f"select * from {dbname}.t1 where c1 between {2**31-2} and {2**31+1}")
tdSql.checkRows(1)
tdSql.query(f"select * from t2 where c1 between null and {1-2**31}")
tdSql.query(f"select * from {dbname}.t2 where c1 between null and {1-2**31}")
# tdSql.checkRows(3)
tdSql.query(f"select * from t2 where c1 between {-2**31} and {1-2**31}")
tdSql.query(f"select * from {dbname}.t2 where c1 between {-2**31} and {1-2**31}")
tdSql.checkRows(1)
tdLog.printNoPrefix("==========step5:query float type")
tdSql.query("select * from t1 where c2 between 20.0 and 21.0")
tdSql.query(f"select * from {dbname}.t1 where c2 between 20.0 and 21.0")
tdSql.checkRows(10)
tdSql.query(f"select * from t1 where c2 between {-3.4*10**38-1} and {3.4*10**38+1}")
tdSql.checkRows(11)
tdSql.query("select * from t1 where c2 between 21.0 and 20.0")
tdSql.query(f"select * from {dbname}.t1 where c2 between {-3.4*10**38-1} and {3.4*10**38+1}")
tdSql.checkRows(rows+1)
tdSql.query(f"select * from {dbname}.t1 where c2 between 21.0 and 20.0")
tdSql.checkRows(0)
tdSql.query("select * from t1 where c2 between 'DC3' and 'SYN'")
tdSql.query(f"select * from {dbname}.t1 where c2 between 'DC3' and 'SYN'")
tdSql.checkRows(0)
tdSql.query("select * from t1 where c2 not between 0.1 and 0.2")
tdSql.checkRows(11)
tdSql.query(f"select * from t1 where c2 between {pow(10,38)*3.4} and {pow(10,38)*3.4+1}")
tdSql.query(f"select * from {dbname}.t1 where c2 not between 0.1 and 0.2")
tdSql.checkRows(rows+1)
tdSql.query(f"select * from {dbname}.t1 where c2 between {pow(10,38)*3.4} and {pow(10,38)*3.4+1}")
# tdSql.checkRows(1)
tdSql.query(f"select * from t2 where c2 between {-3.4*10**38-1} and {-3.4*10**38}")
tdSql.query(f"select * from {dbname}.t2 where c2 between {-3.4*10**38-1} and {-3.4*10**38}")
# tdSql.checkRows(2)
tdSql.query(f"select * from t2 where c2 between null and {-3.4*10**38}")
tdSql.query(f"select * from {dbname}.t2 where c2 between null and {-3.4*10**38}")
# tdSql.checkRows(3)
tdLog.printNoPrefix("==========step6:query bigint type")
tdSql.query(f"select * from t1 where c3 between {2**31} and {2**31+10}")
tdSql.checkRows(10)
tdSql.query(f"select * from t1 where c3 between {-2**63} and {2**63}")
tdSql.checkRows(11)
tdSql.query(f"select * from t1 where c3 between {2**31+10} and {2**31}")
tdSql.query(f"select * from {dbname}.t1 where c3 between {2**31} and {2**31+10}")
tdSql.checkRows(rows)
tdSql.query(f"select * from {dbname}.t1 where c3 between {-2**63} and {2**63}")
tdSql.checkRows(rows+1)
tdSql.query(f"select * from {dbname}.t1 where c3 between {2**31+10} and {2**31}")
tdSql.checkRows(0)
tdSql.query("select * from t1 where c3 between 'a' and 'z'")
tdSql.query(f"select * from {dbname}.t1 where c3 between 'a' and 'z'")
tdSql.checkRows(0)
tdSql.query("select * from t1 where c3 not between 1 and 2")
tdSql.query(f"select * from {dbname}.t1 where c3 not between 1 and 2")
# tdSql.checkRows(0)
tdSql.query(f"select * from t1 where c3 between {2**63-2} and {2**63-1}")
tdSql.query(f"select * from {dbname}.t1 where c3 between {2**63-2} and {2**63-1}")
tdSql.checkRows(1)
tdSql.query(f"select * from t2 where c3 between {-2**63} and {1-2**63}")
tdSql.query(f"select * from {dbname}.t2 where c3 between {-2**63} and {1-2**63}")
# tdSql.checkRows(3)
tdSql.query(f"select * from t2 where c3 between null and {1-2**63}")
tdSql.query(f"select * from {dbname}.t2 where c3 between null and {1-2**63}")
# tdSql.checkRows(2)
tdLog.printNoPrefix("==========step7:query double type")
tdSql.query(f"select * from t1 where c4 between {3.4*10**38} and {3.4*10**38+10}")
tdSql.checkRows(10)
tdSql.query(f"select * from t1 where c4 between {1.7*10**308+1} and {1.7*10**308+2}")
tdSql.query(f"select * from {dbname}.t1 where c4 between {3.4*10**38} and {3.4*10**38+10}")
tdSql.checkRows(rows)
tdSql.query(f"select * from {dbname}.t1 where c4 between {1.7*10**308+1} and {1.7*10**308+2}")
# 因为精度原因,在超出bigint边界后,数值不能进行准确的判断
# tdSql.checkRows(0)
tdSql.query(f"select * from t1 where c4 between {3.4*10**38+10} and {3.4*10**38}")
tdSql.query(f"select * from {dbname}.t1 where c4 between {3.4*10**38+10} and {3.4*10**38}")
# tdSql.checkRows(0)
tdSql.query("select * from t1 where c4 between 'a' and 'z'")
tdSql.query(f"select * from {dbname}.t1 where c4 between 'a' and 'z'")
tdSql.checkRows(0)
tdSql.query("select * from t1 where c4 not between 1 and 2")
tdSql.query(f"select * from {dbname}.t1 where c4 not between 1 and 2")
# tdSql.checkRows(0)
tdSql.query(f"select * from t1 where c4 between {1.7*10**308} and {1.7*10**308+1}")
tdSql.query(f"select * from {dbname}.t1 where c4 between {1.7*10**308} and {1.7*10**308+1}")
tdSql.checkRows(1)
tdSql.query(f"select * from t2 where c4 between {-1.7*10**308-1} and {-1.7*10**308}")
tdSql.query(f"select * from {dbname}.t2 where c4 between {-1.7*10**308-1} and {-1.7*10**308}")
# tdSql.checkRows(3)
tdSql.query(f"select * from t2 where c4 between null and {-1.7*10**308}")
tdSql.query(f"select * from {dbname}.t2 where c4 between null and {-1.7*10**308}")
# tdSql.checkRows(3)
tdLog.printNoPrefix("==========step8:query smallint type")
tdSql.query("select * from t1 where c5 between 127 and 136")
tdSql.checkRows(10)
tdSql.query("select * from t1 where c5 between 126.9 and 135.9")
tdSql.checkRows(9)
tdSql.query("select * from t1 where c5 between 136 and 127")
tdSql.query(f"select * from {dbname}.t1 where c5 between 127 and 136")
tdSql.checkRows(rows)
tdSql.query(f"select * from {dbname}.t1 where c5 between 126.9 and 135.9")
tdSql.checkRows(rows-1)
tdSql.query(f"select * from {dbname}.t1 where c5 between 136 and 127")
tdSql.checkRows(0)
tdSql.query("select * from t1 where c5 between '~' and '^'")
tdSql.query(f"select * from {dbname}.t1 where c5 between '~' and '^'")
tdSql.checkRows(0)
tdSql.query("select * from t1 where c5 not between 1 and 2")
tdSql.query(f"select * from {dbname}.t1 where c5 not between 1 and 2")
# tdSql.checkRows(0)
tdSql.query("select * from t1 where c5 between 32767 and 32768")
tdSql.query(f"select * from {dbname}.t1 where c5 between 32767 and 32768")
tdSql.checkRows(1)
tdSql.query("select * from t2 where c5 between -32768 and -32767")
tdSql.query(f"select * from {dbname}.t2 where c5 between -32768 and -32767")
tdSql.checkRows(1)
tdSql.query("select * from t2 where c5 between null and -32767")
tdSql.query(f"select * from {dbname}.t2 where c5 between null and -32767")
# tdSql.checkRows(1)
tdLog.printNoPrefix("==========step9:query tinyint type")
tdSql.query("select * from t1 where c6 between 0 and 9")
tdSql.checkRows(10)
tdSql.query("select * from t1 where c6 between -1.1 and 8.9")
tdSql.checkRows(9)
tdSql.query("select * from t1 where c6 between 9 and 0")
tdSql.query(f"select * from {dbname}.t1 where c6 between 0 and 9")
tdSql.checkRows(rows)
tdSql.query(f"select * from {dbname}.t1 where c6 between -1.1 and 8.9")
tdSql.checkRows(rows-1)
tdSql.query(f"select * from {dbname}.t1 where c6 between 9 and 0")
tdSql.checkRows(0)
tdSql.query("select * from t1 where c6 between 'NUL' and 'HT'")
tdSql.query(f"select * from {dbname}.t1 where c6 between 'NUL' and 'HT'")
tdSql.checkRows(1)
tdSql.query("select * from t1 where c6 not between 1 and 2")
tdSql.query(f"select * from {dbname}.t1 where c6 not between 1 and 2")
# tdSql.checkRows(1)
tdSql.query("select * from t1 where c6 between 127 and 128")
tdSql.query(f"select * from {dbname}.t1 where c6 between 127 and 128")
tdSql.checkRows(1)
tdSql.query("select * from t2 where c6 between -128 and -127")
tdSql.query(f"select * from {dbname}.t2 where c6 between -128 and -127")
tdSql.checkRows(1)
tdSql.query("select * from t2 where c6 between null and -127")
tdSql.query(f"select * from {dbname}.t2 where c6 between null and -127")
# tdSql.checkRows(3)
tdLog.printNoPrefix("==========step10:invalid query type")
# TODO tag is not finished
# tdSql.query("select * from supt where location between 'beijing' and 'shanghai'")
# tdSql.checkRows(23)
# # 非0值均解析为1,因此"between 负值 and o"解析为"between 1 and 0"
# tdSql.query("select * from supt where isused between 0 and 1")
# tdSql.checkRows(23)
# tdSql.query("select * from supt where isused between -1 and 0")
# tdSql.checkRows(0)
# tdSql.error("select * from supt where isused between false and true")
# tdSql.query("select * from supt where family between '拖拉机' and '自行车'")
# tdSql.checkRows(23)
tdSql.query(f"select * from {stb} where location between 'beijing' and 'shanghai'")
tdSql.checkRows(rows * 2 + 3)
# 非0值均解析为1,因此"between 负值 and o"解析为"between 1 and 0"
tdSql.query(f"select * from {stb} where isused between 0 and 1")
tdSql.checkRows(rows * 2 + 3)
tdSql.query(f"select * from {stb} where isused between -1 and 0")
tdSql.checkRows(rows + 2)
tdSql.query(f"select * from {stb} where isused between false and true")
tdSql.checkRows(rows * 2 + 3)
tdSql.query(f"select * from {stb} where family between '拖拉机' and '自行车'")
tdSql.checkRows(0)
tdLog.printNoPrefix("==========step11:query HEX/OCT/BIN type")
tdSql.error("select * from t1 where c6 between 0x7f and 0x80") # check filter HEX
tdSql.error("select * from t1 where c6 between 0b1 and 0b11111") # check filter BIN
tdSql.error("select * from t1 where c6 between 0b1 and 0x80")
tdSql.error("select * from t1 where c6=0b1")
tdSql.error("select * from t1 where c6=0x1")
tdSql.error(f"select * from {dbname}.t1 where c6 between 0x7f and 0x80") # check filter HEX
tdSql.error(f"select * from {dbname}.t1 where c6 between 0b1 and 0b11111") # check filter BIN
tdSql.error(f"select * from {dbname}.t1 where c6 between 0b1 and 0x80")
tdSql.error(f"select * from {dbname}.t1 where c6=0b1")
tdSql.error(f"select * from {dbname}.t1 where c6=0x1")
# 八进制数据会按照十进制数据进行判定
tdSql.query("select * from t1 where c6 between 01 and 0200") # check filter OCT
tdSql.checkRows(10)
tdSql.query(f"select * from {dbname}.t1 where c6 between 01 and 0200") # check filter OCT
tdSql.checkRows(rows)
def stop(self):
tdSql.close()
......
......@@ -26,7 +26,7 @@ class TDTestCase:
tdSql.init(conn.cursor())
self.dbname = 'db_test'
self.setsql = TDSetSql()
self.ntbname = 'ntb'
self.ntbname = f'{self.dbname}.ntb'
self.rowNum = 10
self.tbnum = 20
self.ts = 1537146000000
......@@ -96,7 +96,7 @@ class TDTestCase:
self.bottom_check_data(self.ntbname,'normal_table')
tdSql.execute(f'drop database {self.dbname}')
def bottom_check_stb(self):
stbname = tdCom.getLongName(5, "letters")
stbname = f'{self.dbname}.{tdCom.getLongName(5, "letters")}'
tag_dict = {
't0':'int'
}
......@@ -109,7 +109,7 @@ class TDTestCase:
for i in range(self.tbnum):
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags({tag_values[0]})")
self.insert_data(self.column_dict,f'{stbname}_{i}',self.rowNum)
tdSql.query('show tables')
tdSql.query(f'show {self.dbname}.tables')
vgroup_list = []
for i in range(len(tdSql.queryResult)):
vgroup_list.append(tdSql.queryResult[i][6])
......
此差异已折叠。
此差异已折叠。
......@@ -5,7 +5,6 @@ import json
from dataclasses import dataclass, field
from typing import List, Any, Tuple
from certifi import where
from util.log import tdLog
from util.sql import tdSql
from util.cases import tdCases
......
此差异已折叠。
......@@ -169,13 +169,13 @@ class TDTestCase:
tdSql.checkData(0,1,self.row_nums)
tdSql.query("select c1 , mavg(c1 ,2 ) from stb partition by c1")
tdSql.checkRows(72)
tdSql.checkRows(90)
tdSql.query("select c1 , diff(c1 , 0) from stb partition by c1")
tdSql.checkRows(72)
tdSql.checkRows(90)
tdSql.query("select c1 , csum(c1) from stb partition by c1")
tdSql.checkRows(80)
tdSql.checkRows(100)
tdSql.query("select c1 , sample(c1,2) from stb partition by c1 order by c1")
tdSql.checkRows(21)
......@@ -191,7 +191,7 @@ class TDTestCase:
tdSql.checkData(0,1,None)
tdSql.query("select c1 , DERIVATIVE(c1,2,1) from stb partition by c1 order by c1")
tdSql.checkRows(72)
tdSql.checkRows(90)
# bug need fix
# tdSql.checkData(0,1,None)
......
此差异已折叠。
......@@ -184,7 +184,7 @@ class TDTestCase:
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
# queryString = "select ts, c1, c2 from %s.%s "%(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' and t5 == 'shanghai' "%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册