提交 ae55ce30 编写于 作者: G Ganlin Zhao

Merge branch '3.0' into fix/TD-16877

...@@ -174,6 +174,8 @@ cmake .. -G "NMake Makefiles" ...@@ -174,6 +174,8 @@ cmake .. -G "NMake Makefiles"
nmake nmake
``` ```
如果你使用的是 Visual Studio 2022 版本, 脚本 `vcvarsall.bat` 的默认安装路径是 `C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvarsall.bat`
### Mac OS X 系统 ### Mac OS X 系统
安装 Xcode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。 安装 Xcode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。
......
...@@ -136,7 +136,7 @@ cmake .. -DCPUTYPE=mips64 && cmake --build . ...@@ -136,7 +136,7 @@ cmake .. -DCPUTYPE=mips64 && cmake --build .
### On Windows platform ### On Windows platform
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". If you use Visual Studio 2013, please open a command window by executing "cmd.exe".
Please specify "amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat. Please specify "amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```cmd ```cmd
mkdir debug && cd debug mkdir debug && cd debug
...@@ -145,7 +145,7 @@ cmake .. -G "NMake Makefiles" ...@@ -145,7 +145,7 @@ cmake .. -G "NMake Makefiles"
nmake nmake
``` ```
If you use the Visual Studio 2019 or 2017: If you use Visual Studio 2019 or 2017:
please open a command window by executing "cmd.exe". please open a command window by executing "cmd.exe".
Please specify "x64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat. Please specify "x64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
...@@ -164,6 +164,8 @@ cmake .. -G "NMake Makefiles" ...@@ -164,6 +164,8 @@ cmake .. -G "NMake Makefiles"
nmake nmake
``` ```
If you use Visual Studio 2022, the only change is the default path of `vcvarsall.bat`, which is `C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvarsall.bat`.
### On Mac OS X platform ### On Mac OS X platform
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur. Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
......
...@@ -2687,11 +2687,13 @@ int32_t tDeserializeSDbCfgRsp(void *buf, int32_t bufLen, SDbCfgRsp *pRsp) { ...@@ -2687,11 +2687,13 @@ int32_t tDeserializeSDbCfgRsp(void *buf, int32_t bufLen, SDbCfgRsp *pRsp) {
if (tDecodeI8(&decoder, &pRsp->strict) < 0) return -1; if (tDecodeI8(&decoder, &pRsp->strict) < 0) return -1;
if (tDecodeI8(&decoder, &pRsp->cacheLastRow) < 0) return -1; if (tDecodeI8(&decoder, &pRsp->cacheLastRow) < 0) return -1;
if (tDecodeI32(&decoder, &pRsp->numOfRetensions) < 0) return -1; if (tDecodeI32(&decoder, &pRsp->numOfRetensions) < 0) return -1;
if (pRsp->numOfRetensions > 0) {
pRsp->pRetensions = taosArrayInit(pRsp->numOfRetensions, sizeof(SRetention)); pRsp->pRetensions = taosArrayInit(pRsp->numOfRetensions, sizeof(SRetention));
if (pRsp->pRetensions == NULL) { if (pRsp->pRetensions == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY; terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1; return -1;
} }
}
for (int32_t i = 0; i < pRsp->numOfRetensions; ++i) { for (int32_t i = 0; i < pRsp->numOfRetensions; ++i) {
SRetention rentension = {0}; SRetention rentension = {0};
......
...@@ -83,8 +83,8 @@ int32_t smaBegin(SSma *pSma) { ...@@ -83,8 +83,8 @@ int32_t smaBegin(SSma *pSma) {
/** /**
* @brief pre-commit for rollup sma. * @brief pre-commit for rollup sma.
* 1) set trigger stat of rsma timer TASK_TRIGGER_STAT_PAUSED. * 1) set trigger stat of rsma timer TASK_TRIGGER_STAT_PAUSED.
* 2) perform persist task for qTaskInfo * 2) wait all triggered fetch tasks finished
* 3) wait all triggered fetch tasks finished * 3) perform persist task for qTaskInfo
* *
* @param pSma * @param pSma
* @return int32_t * @return int32_t
...@@ -102,10 +102,7 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) { ...@@ -102,10 +102,7 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
// step 1: set persistence task paused // step 1: set persistence task paused
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED); atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
// step 2: perform persist task for qTaskInfo // step 2: wait all triggered fetch tasks finished
tdRSmaPersistExecImpl(pRSmaStat);
// step 3: wait all triggered fetch tasks finished
int32_t nLoops = 0; int32_t nLoops = 0;
while (1) { while (1) {
if (T_REF_VAL_GET(pStat) == 0) { if (T_REF_VAL_GET(pStat) == 0) {
...@@ -121,6 +118,9 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) { ...@@ -121,6 +118,9 @@ static int32_t tdProcessRSmaPreCommitImpl(SSma *pSma) {
} }
} }
// step 3: perform persist task for qTaskInfo
tdRSmaPersistExecImpl(pRSmaStat);
smaDebug("vgId:%d, rsma pre commit succeess", SMA_VID(pSma)); smaDebug("vgId:%d, rsma pre commit succeess", SMA_VID(pSma));
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
......
...@@ -2843,11 +2843,18 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan ...@@ -2843,11 +2843,18 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan
int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) { int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) {
int32_t type = pOperator->operatorType; int32_t type = pOperator->operatorType;
pOperator->status = OP_OPENED;
if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
SStreamBlockScanInfo* pScanInfo = pOperator->info; SStreamBlockScanInfo* pScanInfo = pOperator->info;
pScanInfo->blockType = STREAM_INPUT__DATA_SCAN; pScanInfo->blockType = STREAM_INPUT__DATA_SCAN;
pScanInfo->pSnapshotReadOp->status = OP_OPENED;
STableScanInfo* pInfo = pScanInfo->pSnapshotReadOp->info; STableScanInfo* pInfo = pScanInfo->pSnapshotReadOp->info;
ASSERT(pInfo->scanMode == TABLE_SCAN__TABLE_ORDER);
if (uid == 0) { if (uid == 0) {
pInfo->noTable = 1; pInfo->noTable = 1;
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
...@@ -2861,14 +2868,6 @@ int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) { ...@@ -2861,14 +2868,6 @@ int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) {
pInfo->noTable = 0; pInfo->noTable = 0;
if (pInfo->lastStatus.uid != uid || pInfo->lastStatus.ts != ts) { if (pInfo->lastStatus.uid != uid || pInfo->lastStatus.ts != ts) {
tsdbSetTableId(pInfo->dataReader, uid);
int64_t oldSkey = pInfo->cond.twindows[0].skey;
pInfo->cond.twindows[0].skey = ts + 1;
tsdbResetReadHandle(pInfo->dataReader, &pInfo->cond, 0);
pInfo->cond.twindows[0].skey = oldSkey;
pInfo->scanTimes = 0;
pInfo->curTWinIdx = 0;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList); int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
...@@ -2880,8 +2879,17 @@ int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) { ...@@ -2880,8 +2879,17 @@ int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) {
pInfo->currentTable = i; pInfo->currentTable = i;
} }
} }
// TODO after processing drop, // TODO after processing drop, found can be false
ASSERT(found); ASSERT(found);
tsdbSetTableId(pInfo->dataReader, uid);
int64_t oldSkey = pInfo->cond.twindows[0].skey;
pInfo->cond.twindows[0].skey = ts + 1;
tsdbResetReadHandle(pInfo->dataReader, &pInfo->cond, 0);
pInfo->cond.twindows[0].skey = oldSkey;
pInfo->scanTimes = 0;
pInfo->curTWinIdx = 0;
qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts, qDebug("tsdb reader offset seek to uid %ld ts %ld, table cur set to %d , all table num %d", uid, ts,
pInfo->currentTable, tableSz); pInfo->currentTable, tableSz);
} }
......
...@@ -784,29 +784,18 @@ static int32_t parseBoolFromValueNode(STranslateContext* pCxt, SValueNode* pVal) ...@@ -784,29 +784,18 @@ static int32_t parseBoolFromValueNode(STranslateContext* pCxt, SValueNode* pVal)
} }
} }
static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SDataType targetDt) { static EDealRes translateDurationValue(STranslateContext* pCxt, SValueNode* pVal) {
uint8_t precision = getPrecisionFromCurrStmt(pCxt->pCurrStmt, targetDt.precision); if (parseNatualDuration(pVal->literal, strlen(pVal->literal), &pVal->datum.i, &pVal->unit,
pVal->node.resType.precision = precision; pVal->node.resType.precision) != TSDB_CODE_SUCCESS) {
if (pVal->placeholderNo > 0 || pVal->isNull) {
return DEAL_RES_CONTINUE;
}
if (TSDB_DATA_TYPE_NULL == pVal->node.resType.type) {
// TODO
// pVal->node.resType = targetDt;
pVal->translate = true;
pVal->isNull = true;
return DEAL_RES_CONTINUE;
}
if (pVal->isDuration) {
if (parseNatualDuration(pVal->literal, strlen(pVal->literal), &pVal->datum.i, &pVal->unit, precision) !=
TSDB_CODE_SUCCESS) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
} }
*(int64_t*)&pVal->typeData = pVal->datum.i; *(int64_t*)&pVal->typeData = pVal->datum.i;
} else { return DEAL_RES_CONTINUE;
}
static EDealRes translateNormalValue(STranslateContext* pCxt, SValueNode* pVal, SDataType targetDt, bool strict) {
int32_t code = TSDB_CODE_SUCCESS;
switch (targetDt.type) { switch (targetDt.type) {
case TSDB_DATA_TYPE_NULL:
break;
case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_BOOL:
if (TSDB_CODE_SUCCESS != parseBoolFromValueNode(pCxt, pVal)) { if (TSDB_CODE_SUCCESS != parseBoolFromValueNode(pCxt, pVal)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
...@@ -814,42 +803,66 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD ...@@ -814,42 +803,66 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD
*(bool*)&pVal->typeData = pVal->datum.b; *(bool*)&pVal->typeData = pVal->datum.b;
break; break;
case TSDB_DATA_TYPE_TINYINT: { case TSDB_DATA_TYPE_TINYINT: {
pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); code = toInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.i);
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_TINYINT(pVal->datum.i))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(int8_t*)&pVal->typeData = pVal->datum.i; *(int8_t*)&pVal->typeData = pVal->datum.i;
break; break;
} }
case TSDB_DATA_TYPE_SMALLINT: { case TSDB_DATA_TYPE_SMALLINT: {
pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); code = toInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.i);
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_SMALLINT(pVal->datum.i))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(int16_t*)&pVal->typeData = pVal->datum.i; *(int16_t*)&pVal->typeData = pVal->datum.i;
break; break;
} }
case TSDB_DATA_TYPE_INT: { case TSDB_DATA_TYPE_INT: {
pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); code = toInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.i);
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_INT(pVal->datum.i))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(int32_t*)&pVal->typeData = pVal->datum.i; *(int32_t*)&pVal->typeData = pVal->datum.i;
break; break;
} }
case TSDB_DATA_TYPE_BIGINT: { case TSDB_DATA_TYPE_BIGINT: {
pVal->datum.i = taosStr2Int64(pVal->literal, NULL, 10); code = toInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.i);
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_BIGINT(pVal->datum.i))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(int64_t*)&pVal->typeData = pVal->datum.i; *(int64_t*)&pVal->typeData = pVal->datum.i;
break; break;
} }
case TSDB_DATA_TYPE_UTINYINT: { case TSDB_DATA_TYPE_UTINYINT: {
pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u);
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UTINYINT(pVal->datum.i))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(uint8_t*)&pVal->typeData = pVal->datum.u; *(uint8_t*)&pVal->typeData = pVal->datum.u;
break; break;
} }
case TSDB_DATA_TYPE_USMALLINT: { case TSDB_DATA_TYPE_USMALLINT: {
pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u);
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_USMALLINT(pVal->datum.i))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(uint16_t*)&pVal->typeData = pVal->datum.u; *(uint16_t*)&pVal->typeData = pVal->datum.u;
break; break;
} }
case TSDB_DATA_TYPE_UINT: { case TSDB_DATA_TYPE_UINT: {
pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u);
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UINT(pVal->datum.i))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(uint32_t*)&pVal->typeData = pVal->datum.u; *(uint32_t*)&pVal->typeData = pVal->datum.u;
break; break;
} }
case TSDB_DATA_TYPE_UBIGINT: { case TSDB_DATA_TYPE_UBIGINT: {
pVal->datum.u = taosStr2UInt64(pVal->literal, NULL, 10); code = toUInteger(pVal->literal, strlen(pVal->literal), 10, &pVal->datum.u);
if (strict && (TSDB_CODE_SUCCESS != code || !IS_VALID_UBIGINT(pVal->datum.i))) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
*(uint64_t*)&pVal->typeData = pVal->datum.u; *(uint64_t*)&pVal->typeData = pVal->datum.u;
break; break;
} }
...@@ -901,10 +914,33 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD ...@@ -901,10 +914,33 @@ static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SD
default: default:
break; break;
} }
return DEAL_RES_CONTINUE;
}
static EDealRes translateValueImpl(STranslateContext* pCxt, SValueNode* pVal, SDataType targetDt, bool strict) {
if (pVal->placeholderNo > 0 || pVal->isNull) {
return DEAL_RES_CONTINUE;
} }
pVal->node.resType = targetDt;
if (TSDB_DATA_TYPE_NULL == pVal->node.resType.type) {
// TODO
// pVal->node.resType = targetDt;
pVal->translate = true; pVal->translate = true;
pVal->isNull = true;
return DEAL_RES_CONTINUE; return DEAL_RES_CONTINUE;
}
pVal->node.resType.precision = getPrecisionFromCurrStmt(pCxt->pCurrStmt, targetDt.precision);
EDealRes res = DEAL_RES_CONTINUE;
if (pVal->isDuration) {
res = translateDurationValue(pCxt, pVal);
} else {
res = translateNormalValue(pCxt, pVal, targetDt, strict);
}
pVal->node.resType = targetDt;
pVal->translate = true;
return res;
} }
static int32_t calcTypeBytes(SDataType dt) { static int32_t calcTypeBytes(SDataType dt) {
...@@ -920,7 +956,7 @@ static int32_t calcTypeBytes(SDataType dt) { ...@@ -920,7 +956,7 @@ static int32_t calcTypeBytes(SDataType dt) {
static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal) { static EDealRes translateValue(STranslateContext* pCxt, SValueNode* pVal) {
SDataType dt = pVal->node.resType; SDataType dt = pVal->node.resType;
dt.bytes = calcTypeBytes(dt); dt.bytes = calcTypeBytes(dt);
return translateValueImpl(pCxt, pVal, dt); return translateValueImpl(pCxt, pVal, dt, false);
} }
static bool isMultiResFunc(SNode* pNode) { static bool isMultiResFunc(SNode* pNode) {
...@@ -3216,13 +3252,30 @@ static bool validRollupFunc(const char* pFunc) { ...@@ -3216,13 +3252,30 @@ static bool validRollupFunc(const char* pFunc) {
return false; return false;
} }
static int32_t checkTableRollupOption(STranslateContext* pCxt, SNodeList* pFuncs) { static int32_t checkTableRollupOption(STranslateContext* pCxt, SNodeList* pFuncs, bool createStable,
SDbCfgInfo* pDbCfg) {
if (NULL == pFuncs) { if (NULL == pFuncs) {
if (NULL != pDbCfg->pRetensions) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION,
"To create a super table in a database with the retensions parameter configured, "
"the 'ROLLUP' option must be present");
}
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if (1 != LIST_LENGTH(pFuncs) || !validRollupFunc(((SFunctionNode*)nodesListGetNode(pFuncs, 0))->functionName)) { if (!createStable || NULL == pDbCfg->pRetensions) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROLLUP_OPTION); return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION,
"Invalid option rollup: Only supported for create super table in databases "
"configured with the 'RETENTIONS' option");
}
if (1 != LIST_LENGTH(pFuncs)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROLLUP_OPTION,
"Invalid option rollup: only one function is allowed");
}
const char* pFunc = ((SFunctionNode*)nodesListGetNode(pFuncs, 0))->functionName;
if (!validRollupFunc(pFunc)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROLLUP_OPTION,
"Invalid option rollup: %s function is not supported", pFunc);
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -3248,8 +3301,8 @@ static int32_t checkTableTagsSchema(STranslateContext* pCxt, SHashObj* pHash, SN ...@@ -3248,8 +3301,8 @@ static int32_t checkTableTagsSchema(STranslateContext* pCxt, SHashObj* pHash, SN
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG); code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
} }
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
if ((TSDB_DATA_TYPE_VARCHAR == pTag->dataType.type && pTag->dataType.bytes > TSDB_MAX_BINARY_LEN) || if ((TSDB_DATA_TYPE_VARCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_BINARY_LEN) ||
(TSDB_DATA_TYPE_NCHAR == pTag->dataType.type && pTag->dataType.bytes > TSDB_MAX_NCHAR_LEN)) { (TSDB_DATA_TYPE_NCHAR == pTag->dataType.type && calcTypeBytes(pTag->dataType) > TSDB_MAX_NCHAR_LEN)) {
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN); code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN);
} }
} }
...@@ -3270,11 +3323,11 @@ static int32_t checkTableTagsSchema(STranslateContext* pCxt, SHashObj* pHash, SN ...@@ -3270,11 +3323,11 @@ static int32_t checkTableTagsSchema(STranslateContext* pCxt, SHashObj* pHash, SN
return code; return code;
} }
static int32_t checkTableColsSchema(STranslateContext* pCxt, SHashObj* pHash, SNodeList* pCols) { static int32_t checkTableColsSchema(STranslateContext* pCxt, SHashObj* pHash, int32_t ntags, SNodeList* pCols) {
int32_t ncols = LIST_LENGTH(pCols); int32_t ncols = LIST_LENGTH(pCols);
if (ncols < TSDB_MIN_COLUMNS) { if (ncols < TSDB_MIN_COLUMNS) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM); return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COLUMNS_NUM);
} else if (ncols > TSDB_MAX_COLUMNS) { } else if (ncols + ntags > TSDB_MAX_COLUMNS) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TOO_MANY_COLUMNS); return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TOO_MANY_COLUMNS);
} }
...@@ -3330,7 +3383,7 @@ static int32_t checkTableSchema(STranslateContext* pCxt, SCreateTableStmt* pStmt ...@@ -3330,7 +3383,7 @@ static int32_t checkTableSchema(STranslateContext* pCxt, SCreateTableStmt* pStmt
int32_t code = checkTableTagsSchema(pCxt, pHash, pStmt->pTags); int32_t code = checkTableTagsSchema(pCxt, pHash, pStmt->pTags);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = checkTableColsSchema(pCxt, pHash, pStmt->pCols); code = checkTableColsSchema(pCxt, pHash, LIST_LENGTH(pStmt->pTags), pStmt->pCols);
} }
taosHashCleanup(pHash); taosHashCleanup(pHash);
...@@ -3359,11 +3412,18 @@ static int32_t getTableMaxDelayOption(STranslateContext* pCxt, SValueNode* pVal, ...@@ -3359,11 +3412,18 @@ static int32_t getTableMaxDelayOption(STranslateContext* pCxt, SValueNode* pVal,
pMaxDelay); pMaxDelay);
} }
static int32_t checkTableMaxDelayOption(STranslateContext* pCxt, STableOptions* pOptions) { static int32_t checkTableMaxDelayOption(STranslateContext* pCxt, STableOptions* pOptions, bool createStable,
SDbCfgInfo* pDbCfg) {
if (NULL == pOptions->pMaxDelay) { if (NULL == pOptions->pMaxDelay) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if (!createStable || NULL == pDbCfg->pRetensions) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION,
"Invalid option maxdelay: Only supported for create super table in databases "
"configured with the 'RETENTIONS' option");
}
if (LIST_LENGTH(pOptions->pMaxDelay) > 2) { if (LIST_LENGTH(pOptions->pMaxDelay) > 2) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, "maxdelay"); return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, "maxdelay");
} }
...@@ -3382,11 +3442,18 @@ static int32_t getTableWatermarkOption(STranslateContext* pCxt, SValueNode* pVal ...@@ -3382,11 +3442,18 @@ static int32_t getTableWatermarkOption(STranslateContext* pCxt, SValueNode* pVal
pMaxDelay); pMaxDelay);
} }
static int32_t checkTableWatermarkOption(STranslateContext* pCxt, STableOptions* pOptions) { static int32_t checkTableWatermarkOption(STranslateContext* pCxt, STableOptions* pOptions, bool createStable,
SDbCfgInfo* pDbCfg) {
if (NULL == pOptions->pWatermark) { if (NULL == pOptions->pWatermark) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
if (!createStable || NULL == pDbCfg->pRetensions) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION,
"Invalid option watermark: Only supported for create super table in databases "
"configured with the 'RETENTIONS' option");
}
if (LIST_LENGTH(pOptions->pWatermark) > 2) { if (LIST_LENGTH(pOptions->pWatermark) > 2) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, "watermark"); return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TABLE_OPTION, "watermark");
} }
...@@ -3400,13 +3467,20 @@ static int32_t checkTableWatermarkOption(STranslateContext* pCxt, STableOptions* ...@@ -3400,13 +3467,20 @@ static int32_t checkTableWatermarkOption(STranslateContext* pCxt, STableOptions*
return code; return code;
} }
static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) { static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt, bool createStable) {
int32_t code = checkTableMaxDelayOption(pCxt, pStmt->pOptions); int32_t code = TSDB_CODE_SUCCESS;
SDbCfgInfo dbCfg = {0};
if (createStable) {
code = getDBCfg(pCxt, pStmt->dbName, &dbCfg);
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = checkTableWatermarkOption(pCxt, pStmt->pOptions); code = checkTableMaxDelayOption(pCxt, pStmt->pOptions, createStable, &dbCfg);
} }
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = checkTableRollupOption(pCxt, pStmt->pOptions->pRollupFuncs); code = checkTableWatermarkOption(pCxt, pStmt->pOptions, createStable, &dbCfg);
}
if (TSDB_CODE_SUCCESS == code) {
code = checkTableRollupOption(pCxt, pStmt->pOptions->pRollupFuncs, createStable, &dbCfg);
} }
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = checkTableSmaOption(pCxt, pStmt); code = checkTableSmaOption(pCxt, pStmt);
...@@ -3685,7 +3759,7 @@ static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStm ...@@ -3685,7 +3759,7 @@ static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStm
static int32_t translateCreateSuperTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) { static int32_t translateCreateSuperTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) {
SMCreateStbReq createReq = {0}; SMCreateStbReq createReq = {0};
int32_t code = checkCreateTable(pCxt, pStmt); int32_t code = checkCreateTable(pCxt, pStmt, true);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = buildCreateStbReq(pCxt, pStmt, &createReq); code = buildCreateStbReq(pCxt, pStmt, &createReq);
} }
...@@ -4265,6 +4339,39 @@ static void getSourceDatabase(SNode* pStmt, int32_t acctId, char* pDbFName) { ...@@ -4265,6 +4339,39 @@ static void getSourceDatabase(SNode* pStmt, int32_t acctId, char* pDbFName) {
tNameGetFullDbName(&name, pDbFName); tNameGetFullDbName(&name, pDbFName);
} }
static int32_t addWstartTsToCreateStreamQuery(SNode* pStmt) {
SSelectStmt* pSelect = (SSelectStmt*)pStmt;
SNode* pProj = nodesListGetNode(pSelect->pProjectionList, 0);
if (NULL == pSelect->pWindow ||
(QUERY_NODE_FUNCTION == nodeType(pProj) && 0 == strcmp("_wstartts", ((SFunctionNode*)pProj)->functionName))) {
return TSDB_CODE_SUCCESS;
}
SFunctionNode* pFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION);
if (NULL == pFunc) {
return TSDB_CODE_OUT_OF_MEMORY;
}
strcpy(pFunc->functionName, "_wstartts");
strcpy(pFunc->node.aliasName, pFunc->functionName);
int32_t code = nodesListPushFront(pSelect->pProjectionList, (SNode*)pFunc);
if (TSDB_CODE_SUCCESS != code) {
nodesDestroyNode((SNode*)pFunc);
}
return code;
}
static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SNode* pStmt, SCMCreateStreamReq* pReq) {
pCxt->createStream = true;
int32_t code = addWstartTsToCreateStreamQuery(pStmt);
if (TSDB_CODE_SUCCESS == code) {
code = translateQuery(pCxt, pStmt);
}
if (TSDB_CODE_SUCCESS == code) {
getSourceDatabase(pStmt, pCxt->pParseCxt->acctId, pReq->sourceDB);
code = nodesNodeToString(pStmt, false, &pReq->ast, NULL);
}
return code;
}
static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) { static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) {
pReq->igExists = pStmt->ignoreExists; pReq->igExists = pStmt->ignoreExists;
...@@ -4278,13 +4385,7 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt* ...@@ -4278,13 +4385,7 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt*
tNameExtractFullName(&name, pReq->targetStbFullName); tNameExtractFullName(&name, pReq->targetStbFullName);
} }
pCxt->createStream = true; int32_t code = buildCreateStreamQuery(pCxt, pStmt->pQuery, pReq);
int32_t code = translateQuery(pCxt, pStmt->pQuery);
if (TSDB_CODE_SUCCESS == code) {
getSourceDatabase(pStmt->pQuery, pCxt->pParseCxt->acctId, pReq->sourceDB);
code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL);
}
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
pReq->sql = strdup(pCxt->pParseCxt->pSql); pReq->sql = strdup(pCxt->pParseCxt->pSql);
if (NULL == pReq->sql) { if (NULL == pReq->sql) {
...@@ -5233,7 +5334,7 @@ static int32_t buildCreateTableDataBlock(int32_t acctId, const SCreateTableStmt* ...@@ -5233,7 +5334,7 @@ static int32_t buildCreateTableDataBlock(int32_t acctId, const SCreateTableStmt*
static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) { static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) {
SCreateTableStmt* pStmt = (SCreateTableStmt*)pQuery->pRoot; SCreateTableStmt* pStmt = (SCreateTableStmt*)pQuery->pRoot;
int32_t code = checkCreateTable(pCxt, pStmt); int32_t code = checkCreateTable(pCxt, pStmt, false);
SVgroupInfo info = {0}; SVgroupInfo info = {0};
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
code = getTableHashVgroup(pCxt, pStmt->dbName, pStmt->tableName, &info); code = getTableHashVgroup(pCxt, pStmt->dbName, pStmt->tableName, &info);
...@@ -5337,7 +5438,7 @@ static int32_t createTagValFromVal(STranslateContext* pCxt, SDataType targetDt, ...@@ -5337,7 +5438,7 @@ static int32_t createTagValFromVal(STranslateContext* pCxt, SDataType targetDt,
if (NULL == *pVal) { if (NULL == *pVal) {
return TSDB_CODE_OUT_OF_MEMORY; return TSDB_CODE_OUT_OF_MEMORY;
} }
return DEAL_RES_ERROR == translateValueImpl(pCxt, *pVal, targetDt) ? pCxt->errCode : TSDB_CODE_SUCCESS; return DEAL_RES_ERROR == translateValueImpl(pCxt, *pVal, targetDt, true) ? pCxt->errCode : TSDB_CODE_SUCCESS;
} }
static int32_t createTagVal(STranslateContext* pCxt, uint8_t precision, SSchema* pSchema, SNode* pNode, static int32_t createTagVal(STranslateContext* pCxt, uint8_t precision, SSchema* pSchema, SNode* pNode,
...@@ -5721,7 +5822,7 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS ...@@ -5721,7 +5822,7 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS
} }
SDataType targetDt = schemaToDataType(pTableMeta->tableInfo.precision, pSchema); SDataType targetDt = schemaToDataType(pTableMeta->tableInfo.precision, pSchema);
if (DEAL_RES_ERROR == translateValueImpl(pCxt, pStmt->pVal, targetDt)) { if (DEAL_RES_ERROR == translateValueImpl(pCxt, pStmt->pVal, targetDt, true)) {
return pCxt->errCode; return pCxt->errCode;
} }
......
...@@ -159,7 +159,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) { ...@@ -159,7 +159,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) {
* c4 | column | DOUBLE | 8 | * c4 | column | DOUBLE | 8 |
* c5 | column | DOUBLE | 8 | * c5 | column | DOUBLE | 8 |
*/ */
void generateTestT1(MockCatalogService* mcs) { void generateTestTables(MockCatalogService* mcs) {
ITableBuilder& builder = mcs->createTableBuilder("test", "t1", TSDB_NORMAL_TABLE, 6) ITableBuilder& builder = mcs->createTableBuilder("test", "t1", TSDB_NORMAL_TABLE, 6)
.setPrecision(TSDB_TIME_PRECISION_MILLI) .setPrecision(TSDB_TIME_PRECISION_MILLI)
.setVgid(1) .setVgid(1)
...@@ -183,8 +183,18 @@ void generateTestT1(MockCatalogService* mcs) { ...@@ -183,8 +183,18 @@ void generateTestT1(MockCatalogService* mcs) {
* tag2 | tag | VARCHAR | 20 | * tag2 | tag | VARCHAR | 20 |
* tag3 | tag | TIMESTAMP | 8 | * tag3 | tag | TIMESTAMP | 8 |
* Child Table: st1s1, st1s2 * Child Table: st1s1, st1s2
*
* Super Table: st2
* Field | Type | DataType | Bytes |
* ==========================================================================
* ts | column | TIMESTAMP | 8 |
* c1 | column | INT | 4 |
* c2 | column | VARCHAR | 20 |
* jtag | tag | json | -- |
* Child Table: st2s1, st2s2
*/ */
void generateTestST1(MockCatalogService* mcs) { void generateTestStables(MockCatalogService* mcs) {
{
ITableBuilder& builder = mcs->createTableBuilder("test", "st1", TSDB_SUPER_TABLE, 3, 3) ITableBuilder& builder = mcs->createTableBuilder("test", "st1", TSDB_SUPER_TABLE, 3, 3)
.setPrecision(TSDB_TIME_PRECISION_MILLI) .setPrecision(TSDB_TIME_PRECISION_MILLI)
.addColumn("ts", TSDB_DATA_TYPE_TIMESTAMP) .addColumn("ts", TSDB_DATA_TYPE_TIMESTAMP)
...@@ -197,19 +207,8 @@ void generateTestST1(MockCatalogService* mcs) { ...@@ -197,19 +207,8 @@ void generateTestST1(MockCatalogService* mcs) {
mcs->createSubTable("test", "st1", "st1s1", 1); mcs->createSubTable("test", "st1", "st1s1", 1);
mcs->createSubTable("test", "st1", "st1s2", 2); mcs->createSubTable("test", "st1", "st1s2", 2);
mcs->createSubTable("test", "st1", "st1s3", 1); mcs->createSubTable("test", "st1", "st1s3", 1);
} }
{
/*
* Super Table: st2
* Field | Type | DataType | Bytes |
* ==========================================================================
* ts | column | TIMESTAMP | 8 |
* c1 | column | INT | 4 |
* c2 | column | VARCHAR | 20 |
* jtag | tag | json | -- |
* Child Table: st2s1, st2s2
*/
void generateTestST2(MockCatalogService* mcs) {
ITableBuilder& builder = mcs->createTableBuilder("test", "st2", TSDB_SUPER_TABLE, 3, 1) ITableBuilder& builder = mcs->createTableBuilder("test", "st2", TSDB_SUPER_TABLE, 3, 1)
.setPrecision(TSDB_TIME_PRECISION_MILLI) .setPrecision(TSDB_TIME_PRECISION_MILLI)
.addColumn("ts", TSDB_DATA_TYPE_TIMESTAMP) .addColumn("ts", TSDB_DATA_TYPE_TIMESTAMP)
...@@ -219,6 +218,7 @@ void generateTestST2(MockCatalogService* mcs) { ...@@ -219,6 +218,7 @@ void generateTestST2(MockCatalogService* mcs) {
builder.done(); builder.done();
mcs->createSubTable("test", "st2", "st2s1", 1); mcs->createSubTable("test", "st2", "st2s1", 1);
mcs->createSubTable("test", "st2", "st2s2", 2); mcs->createSubTable("test", "st2", "st2s2", 2);
}
} }
void generateFunctions(MockCatalogService* mcs) { void generateFunctions(MockCatalogService* mcs) {
...@@ -233,6 +233,11 @@ void generateDnodes(MockCatalogService* mcs) { ...@@ -233,6 +233,11 @@ void generateDnodes(MockCatalogService* mcs) {
mcs->createDnode(3, "host3", 7030); mcs->createDnode(3, "host3", 7030);
} }
void generateDatabases(MockCatalogService* mcs) {
mcs->createDatabase("test");
mcs->createDatabase("rollup_db", true);
}
} // namespace } // namespace
int32_t __catalogGetHandle(const char* clusterId, struct SCatalog** catalogHandle) { return 0; } int32_t __catalogGetHandle(const char* clusterId, struct SCatalog** catalogHandle) { return 0; }
...@@ -262,7 +267,7 @@ int32_t __catalogGetDBVgInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char ...@@ -262,7 +267,7 @@ int32_t __catalogGetDBVgInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char
} }
int32_t __catalogGetDBCfg(SCatalog* pCtg, SRequestConnInfo* pConn, const char* dbFName, SDbCfgInfo* pDbCfg) { int32_t __catalogGetDBCfg(SCatalog* pCtg, SRequestConnInfo* pConn, const char* dbFName, SDbCfgInfo* pDbCfg) {
return 0; return g_mockCatalogService->catalogGetDBCfg(dbFName, pDbCfg);
} }
int32_t __catalogChkAuth(SCatalog* pCtg, SRequestConnInfo* pConn, const char* user, const char* dbFName, AUTH_TYPE type, int32_t __catalogChkAuth(SCatalog* pCtg, SRequestConnInfo* pConn, const char* user, const char* dbFName, AUTH_TYPE type,
...@@ -359,11 +364,11 @@ void initMetaDataEnv() { ...@@ -359,11 +364,11 @@ void initMetaDataEnv() {
} }
void generateMetaData() { void generateMetaData() {
generateDatabases(g_mockCatalogService.get());
generateInformationSchema(g_mockCatalogService.get()); generateInformationSchema(g_mockCatalogService.get());
generatePerformanceSchema(g_mockCatalogService.get()); generatePerformanceSchema(g_mockCatalogService.get());
generateTestT1(g_mockCatalogService.get()); generateTestTables(g_mockCatalogService.get());
generateTestST1(g_mockCatalogService.get()); generateTestStables(g_mockCatalogService.get());
generateTestST2(g_mockCatalogService.get());
generateFunctions(g_mockCatalogService.get()); generateFunctions(g_mockCatalogService.get());
generateDnodes(g_mockCatalogService.get()); generateDnodes(g_mockCatalogService.get());
g_mockCatalogService->showTables(); g_mockCatalogService->showTables();
......
...@@ -140,6 +140,17 @@ class MockCatalogServiceImpl { ...@@ -140,6 +140,17 @@ class MockCatalogServiceImpl {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
int32_t catalogGetDBCfg(const char* pDbFName, SDbCfgInfo* pDbCfg) const {
std::string dbFName(pDbFName);
DbCfgCache::const_iterator it = dbCfg_.find(dbFName.substr(std::string(pDbFName).find_last_of('.') + 1));
if (dbCfg_.end() == it) {
return TSDB_CODE_FAILED;
}
memcpy(pDbCfg, &(it->second), sizeof(SDbCfgInfo));
return TSDB_CODE_SUCCESS;
}
int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const { int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const {
auto it = udf_.find(funcName); auto it = udf_.find(funcName);
if (udf_.end() == it) { if (udf_.end() == it) {
...@@ -323,12 +334,21 @@ class MockCatalogServiceImpl { ...@@ -323,12 +334,21 @@ class MockCatalogServiceImpl {
dnode_.insert(std::make_pair(dnodeId, epSet)); dnode_.insert(std::make_pair(dnodeId, epSet));
} }
void createDatabase(const std::string& db, bool rollup) {
SDbCfgInfo cfg = {0};
if (rollup) {
cfg.pRetensions = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SRetention));
}
dbCfg_.insert(std::make_pair(db, cfg));
}
private: private:
typedef std::map<std::string, std::shared_ptr<MockTableMeta>> TableMetaCache; typedef std::map<std::string, std::shared_ptr<MockTableMeta>> TableMetaCache;
typedef std::map<std::string, TableMetaCache> DbMetaCache; typedef std::map<std::string, TableMetaCache> DbMetaCache;
typedef std::map<std::string, std::shared_ptr<SFuncInfo>> UdfMetaCache; typedef std::map<std::string, std::shared_ptr<SFuncInfo>> UdfMetaCache;
typedef std::map<std::string, std::vector<STableIndexInfo>> IndexMetaCache; typedef std::map<std::string, std::vector<STableIndexInfo>> IndexMetaCache;
typedef std::map<int32_t, SEpSet> DnodeCache; typedef std::map<int32_t, SEpSet> DnodeCache;
typedef std::map<std::string, SDbCfgInfo> DbCfgCache;
uint64_t getNextId() { return id_++; } uint64_t getNextId() { return id_++; }
...@@ -486,6 +506,7 @@ class MockCatalogServiceImpl { ...@@ -486,6 +506,7 @@ class MockCatalogServiceImpl {
for (int32_t i = 0; i < ndbs; ++i) { for (int32_t i = 0; i < ndbs; ++i) {
SMetaRes res = {0}; SMetaRes res = {0};
res.pRes = taosMemoryCalloc(1, sizeof(SDbCfgInfo)); res.pRes = taosMemoryCalloc(1, sizeof(SDbCfgInfo));
res.code = catalogGetDBCfg((const char*)taosArrayGet(pDbCfgReq, i), (SDbCfgInfo*)res.pRes);
taosArrayPush(*pDbCfgData, &res); taosArrayPush(*pDbCfgData, &res);
} }
} }
...@@ -576,6 +597,7 @@ class MockCatalogServiceImpl { ...@@ -576,6 +597,7 @@ class MockCatalogServiceImpl {
UdfMetaCache udf_; UdfMetaCache udf_;
IndexMetaCache index_; IndexMetaCache index_;
DnodeCache dnode_; DnodeCache dnode_;
DbCfgCache dbCfg_;
}; };
MockCatalogService::MockCatalogService() : impl_(new MockCatalogServiceImpl()) {} MockCatalogService::MockCatalogService() : impl_(new MockCatalogServiceImpl()) {}
...@@ -605,6 +627,8 @@ void MockCatalogService::createDnode(int32_t dnodeId, const std::string& host, i ...@@ -605,6 +627,8 @@ void MockCatalogService::createDnode(int32_t dnodeId, const std::string& host, i
impl_->createDnode(dnodeId, host, port); impl_->createDnode(dnodeId, host, port);
} }
void MockCatalogService::createDatabase(const std::string& db, bool rollup) { impl_->createDatabase(db, rollup); }
int32_t MockCatalogService::catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const { int32_t MockCatalogService::catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const {
return impl_->catalogGetTableMeta(pTableName, pTableMeta); return impl_->catalogGetTableMeta(pTableName, pTableMeta);
} }
...@@ -621,6 +645,10 @@ int32_t MockCatalogService::catalogGetDBVgInfo(const char* pDbFName, SArray** pV ...@@ -621,6 +645,10 @@ int32_t MockCatalogService::catalogGetDBVgInfo(const char* pDbFName, SArray** pV
return impl_->catalogGetDBVgInfo(pDbFName, pVgList); return impl_->catalogGetDBVgInfo(pDbFName, pVgList);
} }
int32_t MockCatalogService::catalogGetDBCfg(const char* pDbFName, SDbCfgInfo* pDbCfg) const {
return impl_->catalogGetDBCfg(pDbFName, pDbCfg);
}
int32_t MockCatalogService::catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const { int32_t MockCatalogService::catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const {
return impl_->catalogGetUdfInfo(funcName, pInfo); return impl_->catalogGetUdfInfo(funcName, pInfo);
} }
......
...@@ -63,11 +63,13 @@ class MockCatalogService { ...@@ -63,11 +63,13 @@ class MockCatalogService {
void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize); void createFunction(const std::string& func, int8_t funcType, int8_t outputType, int32_t outputLen, int32_t bufSize);
void createSmaIndex(const SMCreateSmaReq* pReq); void createSmaIndex(const SMCreateSmaReq* pReq);
void createDnode(int32_t dnodeId, const std::string& host, int16_t port); void createDnode(int32_t dnodeId, const std::string& host, int16_t port);
void createDatabase(const std::string& db, bool rollup = false);
int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const; int32_t catalogGetTableMeta(const SName* pTableName, STableMeta** pTableMeta) const;
int32_t catalogGetTableHashVgroup(const SName* pTableName, SVgroupInfo* vgInfo) const; int32_t catalogGetTableHashVgroup(const SName* pTableName, SVgroupInfo* vgInfo) const;
int32_t catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const; int32_t catalogGetTableDistVgInfo(const SName* pTableName, SArray** pVgList) const;
int32_t catalogGetDBVgInfo(const char* pDbFName, SArray** pVgList) const; int32_t catalogGetDBVgInfo(const char* pDbFName, SArray** pVgList) const;
int32_t catalogGetDBCfg(const char* pDbFName, SDbCfgInfo* pDbCfg) const;
int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const; int32_t catalogGetUdfInfo(const std::string& funcName, SFuncInfo* pInfo) const;
int32_t catalogGetTableIndex(const SName* pTableName, SArray** pIndexes) const; int32_t catalogGetTableIndex(const SName* pTableName, SArray** pIndexes) const;
int32_t catalogGetDnodeList(SArray** pDnodes) const; int32_t catalogGetDnodeList(SArray** pDnodes) const;
......
...@@ -38,9 +38,9 @@ TEST_F(ParserInitialATest, alterDnode) { ...@@ -38,9 +38,9 @@ TEST_F(ParserInitialATest, alterDnode) {
TEST_F(ParserInitialATest, alterDatabase) { TEST_F(ParserInitialATest, alterDatabase) {
useDb("root", "test"); useDb("root", "test");
run("ALTER DATABASE wxy_db CACHELAST 1 FSYNC 200 WAL 1"); run("ALTER DATABASE test CACHELAST 1 FSYNC 200 WAL 1");
run("ALTER DATABASE wxy_db KEEP 2400"); run("ALTER DATABASE test KEEP 2400");
} }
TEST_F(ParserInitialATest, alterLocal) { TEST_F(ParserInitialATest, alterLocal) {
......
...@@ -359,11 +359,11 @@ TEST_F(ParserInitialCTest, createStable) { ...@@ -359,11 +359,11 @@ TEST_F(ParserInitialCTest, createStable) {
memset(&expect, 0, sizeof(SMCreateStbReq)); memset(&expect, 0, sizeof(SMCreateStbReq));
}; };
auto setCreateStbReqFunc = [&](const char* pTbname, int8_t igExists = 0, int64_t delay1 = -1, int64_t delay2 = -1, auto setCreateStbReqFunc = [&](const char* pDbName, const char* pTbName, int8_t igExists = 0, int64_t delay1 = -1,
int64_t watermark1 = TSDB_DEFAULT_ROLLUP_WATERMARK, int64_t delay2 = -1, int64_t watermark1 = TSDB_DEFAULT_ROLLUP_WATERMARK,
int64_t watermark2 = TSDB_DEFAULT_ROLLUP_WATERMARK, int64_t watermark2 = TSDB_DEFAULT_ROLLUP_WATERMARK,
int32_t ttl = TSDB_DEFAULT_TABLE_TTL, const char* pComment = nullptr) { int32_t ttl = TSDB_DEFAULT_TABLE_TTL, const char* pComment = nullptr) {
int32_t len = snprintf(expect.name, sizeof(expect.name), "0.test.%s", pTbname); int32_t len = snprintf(expect.name, sizeof(expect.name), "0.%s.%s", pDbName, pTbName);
expect.name[len] = '\0'; expect.name[len] = '\0';
expect.igExists = igExists; expect.igExists = igExists;
expect.delay1 = delay1; expect.delay1 = delay1;
...@@ -454,14 +454,14 @@ TEST_F(ParserInitialCTest, createStable) { ...@@ -454,14 +454,14 @@ TEST_F(ParserInitialCTest, createStable) {
tFreeSMCreateStbReq(&req); tFreeSMCreateStbReq(&req);
}); });
setCreateStbReqFunc("t1"); setCreateStbReqFunc("test", "t1");
addFieldToCreateStbReqFunc(true, "ts", TSDB_DATA_TYPE_TIMESTAMP); addFieldToCreateStbReqFunc(true, "ts", TSDB_DATA_TYPE_TIMESTAMP);
addFieldToCreateStbReqFunc(true, "c1", TSDB_DATA_TYPE_INT); addFieldToCreateStbReqFunc(true, "c1", TSDB_DATA_TYPE_INT);
addFieldToCreateStbReqFunc(false, "id", TSDB_DATA_TYPE_INT); addFieldToCreateStbReqFunc(false, "id", TSDB_DATA_TYPE_INT);
run("CREATE STABLE t1(ts TIMESTAMP, c1 INT) TAGS(id INT)"); run("CREATE STABLE t1(ts TIMESTAMP, c1 INT) TAGS(id INT)");
clearCreateStbReq(); clearCreateStbReq();
setCreateStbReqFunc("t1", 1, 100 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_MINUTE, 10, setCreateStbReqFunc("rollup_db", "t1", 1, 100 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_MINUTE, 10,
1 * MILLISECOND_PER_MINUTE, 100, "test create table"); 1 * MILLISECOND_PER_MINUTE, 100, "test create table");
addFieldToCreateStbReqFunc(true, "ts", TSDB_DATA_TYPE_TIMESTAMP, 0, 0); addFieldToCreateStbReqFunc(true, "ts", TSDB_DATA_TYPE_TIMESTAMP, 0, 0);
addFieldToCreateStbReqFunc(true, "c1", TSDB_DATA_TYPE_INT); addFieldToCreateStbReqFunc(true, "c1", TSDB_DATA_TYPE_INT);
...@@ -493,7 +493,7 @@ TEST_F(ParserInitialCTest, createStable) { ...@@ -493,7 +493,7 @@ TEST_F(ParserInitialCTest, createStable) {
addFieldToCreateStbReqFunc(false, "a13", TSDB_DATA_TYPE_BOOL); addFieldToCreateStbReqFunc(false, "a13", TSDB_DATA_TYPE_BOOL);
addFieldToCreateStbReqFunc(false, "a14", TSDB_DATA_TYPE_NCHAR, 30 * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); addFieldToCreateStbReqFunc(false, "a14", TSDB_DATA_TYPE_NCHAR, 30 * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE);
addFieldToCreateStbReqFunc(false, "a15", TSDB_DATA_TYPE_VARCHAR, 50 + VARSTR_HEADER_SIZE); addFieldToCreateStbReqFunc(false, "a15", TSDB_DATA_TYPE_VARCHAR, 50 + VARSTR_HEADER_SIZE);
run("CREATE STABLE IF NOT EXISTS test.t1(" run("CREATE STABLE IF NOT EXISTS rollup_db.t1("
"ts TIMESTAMP, c1 INT, c2 INT UNSIGNED, c3 BIGINT, c4 BIGINT UNSIGNED, c5 FLOAT, c6 DOUBLE, c7 BINARY(20), " "ts TIMESTAMP, c1 INT, c2 INT UNSIGNED, c3 BIGINT, c4 BIGINT UNSIGNED, c5 FLOAT, c6 DOUBLE, c7 BINARY(20), "
"c8 SMALLINT, c9 SMALLINT UNSIGNED COMMENT 'test column comment', c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, " "c8 SMALLINT, c9 SMALLINT UNSIGNED COMMENT 'test column comment', c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, "
"c13 NCHAR(30), c14 VARCHAR(50)) " "c13 NCHAR(30), c14 VARCHAR(50)) "
...@@ -507,12 +507,13 @@ TEST_F(ParserInitialCTest, createStable) { ...@@ -507,12 +507,13 @@ TEST_F(ParserInitialCTest, createStable) {
TEST_F(ParserInitialCTest, createStableSemanticCheck) { TEST_F(ParserInitialCTest, createStableSemanticCheck) {
useDb("root", "test"); useDb("root", "test");
run("CREATE STABLE stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(CEIL)", TSDB_CODE_PAR_INVALID_ROLLUP_OPTION); run("CREATE STABLE rollup_db.stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(CEIL)",
TSDB_CODE_PAR_INVALID_ROLLUP_OPTION);
run("CREATE STABLE stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 0s WATERMARK 1m", run("CREATE STABLE rollup_db.stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 0s WATERMARK 1m",
TSDB_CODE_PAR_INVALID_RANGE_OPTION); TSDB_CODE_PAR_INVALID_RANGE_OPTION);
run("CREATE STABLE stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 10s WATERMARK 18m", run("CREATE STABLE rollup_db.stb2 (ts TIMESTAMP, c1 INT) TAGS (tag1 INT) ROLLUP(MAX) MAX_DELAY 10s WATERMARK 18m",
TSDB_CODE_PAR_INVALID_RANGE_OPTION); TSDB_CODE_PAR_INVALID_RANGE_OPTION);
} }
...@@ -561,30 +562,33 @@ TEST_F(ParserInitialCTest, createStream) { ...@@ -561,30 +562,33 @@ TEST_F(ParserInitialCTest, createStream) {
tFreeSCMCreateStreamReq(&req); tFreeSCMCreateStreamReq(&req);
}); });
setCreateStreamReqFunc("s1", "test", "create stream s1 as select * from t1"); setCreateStreamReqFunc("s1", "test", "create stream s1 as select count(*) from t1 interval(10s)");
run("CREATE STREAM s1 AS SELECT * FROM t1"); run("CREATE STREAM s1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)");
clearCreateStreamReq(); clearCreateStreamReq();
setCreateStreamReqFunc("s1", "test", "create stream if not exists s1 as select * from t1", nullptr, 1); setCreateStreamReqFunc("s1", "test", "create stream if not exists s1 as select count(*) from t1 interval(10s)",
run("CREATE STREAM IF NOT EXISTS s1 AS SELECT * FROM t1"); nullptr, 1);
run("CREATE STREAM IF NOT EXISTS s1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)");
clearCreateStreamReq(); clearCreateStreamReq();
setCreateStreamReqFunc("s1", "test", "create stream s1 into st1 as select * from t1", "st1"); setCreateStreamReqFunc("s1", "test", "create stream s1 into st1 as select count(*) from t1 interval(10s)", "st1");
run("CREATE STREAM s1 INTO st1 AS SELECT * FROM t1"); run("CREATE STREAM s1 INTO st1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)");
clearCreateStreamReq(); clearCreateStreamReq();
setCreateStreamReqFunc( setCreateStreamReqFunc("s1", "test",
"s1", "test", "create stream if not exists s1 trigger max_delay 20s watermark 10s ignore expired into st1 "
"create stream if not exists s1 trigger max_delay 20s watermark 10s ignore expired into st1 as select * from t1", "as select count(*) from t1 interval(10s)",
"st1", 1, STREAM_TRIGGER_MAX_DELAY, 20 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_SECOND, 1); "st1", 1, STREAM_TRIGGER_MAX_DELAY, 20 * MILLISECOND_PER_SECOND, 10 * MILLISECOND_PER_SECOND,
run("CREATE STREAM IF NOT EXISTS s1 TRIGGER MAX_DELAY 20s WATERMARK 10s IGNORE EXPIRED INTO st1 AS SELECT * FROM t1"); 1);
run("CREATE STREAM IF NOT EXISTS s1 TRIGGER MAX_DELAY 20s WATERMARK 10s IGNORE EXPIRED INTO st1 AS SELECT COUNT(*) "
"FROM t1 INTERVAL(10S)");
clearCreateStreamReq(); clearCreateStreamReq();
} }
TEST_F(ParserInitialCTest, createStreamSemanticCheck) { TEST_F(ParserInitialCTest, createStreamSemanticCheck) {
useDb("root", "test"); useDb("root", "test");
run("CREATE STREAM s1 AS SELECT PERCENTILE(c1, 30) FROM t1", TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC); run("CREATE STREAM s1 AS SELECT PERCENTILE(c1, 30) FROM t1 INTERVAL(10S)", TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC);
} }
TEST_F(ParserInitialCTest, createTable) { TEST_F(ParserInitialCTest, createTable) {
...@@ -598,7 +602,7 @@ TEST_F(ParserInitialCTest, createTable) { ...@@ -598,7 +602,7 @@ TEST_F(ParserInitialCTest, createTable) {
"c13 NCHAR(30), c15 VARCHAR(50)) " "c13 NCHAR(30), c15 VARCHAR(50)) "
"TTL 100 COMMENT 'test create table' SMA(c1, c2, c3)"); "TTL 100 COMMENT 'test create table' SMA(c1, c2, c3)");
run("CREATE TABLE IF NOT EXISTS test.t1(" run("CREATE TABLE IF NOT EXISTS rollup_db.t1("
"ts TIMESTAMP, c1 INT, c2 INT UNSIGNED, c3 BIGINT, c4 BIGINT UNSIGNED, c5 FLOAT, c6 DOUBLE, c7 BINARY(20), " "ts TIMESTAMP, c1 INT, c2 INT UNSIGNED, c3 BIGINT, c4 BIGINT UNSIGNED, c5 FLOAT, c6 DOUBLE, c7 BINARY(20), "
"c8 SMALLINT, c9 SMALLINT UNSIGNED COMMENT 'test column comment', c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, " "c8 SMALLINT, c9 SMALLINT UNSIGNED COMMENT 'test column comment', c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, "
"c13 NCHAR(30), c14 VARCHAR(50)) " "c13 NCHAR(30), c14 VARCHAR(50)) "
...@@ -617,6 +621,21 @@ TEST_F(ParserInitialCTest, createTable) { ...@@ -617,6 +621,21 @@ TEST_F(ParserInitialCTest, createTable) {
// run("CREATE TABLE IF NOT EXISTS t1 USING st1 TAGS(1, 'wxy', NOW + 1S)"); // run("CREATE TABLE IF NOT EXISTS t1 USING st1 TAGS(1, 'wxy', NOW + 1S)");
} }
TEST_F(ParserInitialCTest, createTableSemanticCheck) {
useDb("root", "test");
string sql = "CREATE TABLE st1(ts TIMESTAMP, ";
for (int32_t i = 1; i < 4096; ++i) {
if (i > 1) {
sql.append(", ");
}
sql.append("c" + to_string(i) + " INT");
}
sql.append(") TAGS (t1 int)");
run(sql, TSDB_CODE_PAR_TOO_MANY_COLUMNS);
}
TEST_F(ParserInitialCTest, createTopic) { TEST_F(ParserInitialCTest, createTopic) {
useDb("root", "test"); useDb("root", "test");
......
...@@ -1220,6 +1220,7 @@ static int32_t qnodeSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) { ...@@ -1220,6 +1220,7 @@ static int32_t qnodeSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)qndSplFindSplitNode, &info)) { if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)qndSplFindSplitNode, &info)) {
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
((SScanLogicNode*)info.pSplitNode)->dataRequired = FUNC_DATA_REQUIRED_DATA_LOAD;
int32_t code = splCreateExchangeNodeForSubplan(pCxt, info.pSubplan, info.pSplitNode, info.pSubplan->subplanType); int32_t code = splCreateExchangeNodeForSubplan(pCxt, info.pSubplan, info.pSplitNode, info.pSubplan->subplanType);
if (TSDB_CODE_SUCCESS == code) { if (TSDB_CODE_SUCCESS == code) {
SLogicSubplan* pScanSubplan = splCreateScanSubplan(pCxt, info.pSplitNode, 0); SLogicSubplan* pScanSubplan = splCreateScanSubplan(pCxt, info.pSplitNode, 0);
......
...@@ -65,6 +65,7 @@ int32_t syncInit() { ...@@ -65,6 +65,7 @@ int32_t syncInit() {
syncCleanUp(); syncCleanUp();
ret = -1; ret = -1;
} else { } else {
sDebug("sync rsetId:%" PRId64 " is open", tsNodeRefId);
ret = syncEnvStart(); ret = syncEnvStart();
} }
} }
...@@ -77,6 +78,7 @@ void syncCleanUp() { ...@@ -77,6 +78,7 @@ void syncCleanUp() {
ASSERT(ret == 0); ASSERT(ret == 0);
if (tsNodeRefId != -1) { if (tsNodeRefId != -1) {
sDebug("sync rsetId:%" PRId64 " is closed", tsNodeRefId);
taosCloseRef(tsNodeRefId); taosCloseRef(tsNodeRefId);
tsNodeRefId = -1; tsNodeRefId = -1;
} }
...@@ -96,6 +98,7 @@ int64_t syncOpen(const SSyncInfo* pSyncInfo) { ...@@ -96,6 +98,7 @@ int64_t syncOpen(const SSyncInfo* pSyncInfo) {
return -1; return -1;
} }
sDebug("vgId:%d, rid:%" PRId64 " is added to rsetId:%" PRId64, pSyncInfo->vgId, pSyncNode->rid, tsNodeRefId);
return pSyncNode->rid; return pSyncNode->rid;
} }
...@@ -136,13 +139,14 @@ void syncStartStandBy(int64_t rid) { ...@@ -136,13 +139,14 @@ void syncStartStandBy(int64_t rid) {
void syncStop(int64_t rid) { void syncStop(int64_t rid) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) { if (pSyncNode == NULL) return;
return;
} int32_t vgId = pSyncNode->vgId;
syncNodeClose(pSyncNode); syncNodeClose(pSyncNode);
taosReleaseRef(tsNodeRefId, pSyncNode->rid); taosReleaseRef(tsNodeRefId, pSyncNode->rid);
taosRemoveRef(tsNodeRefId, rid); taosRemoveRef(tsNodeRefId, rid);
sDebug("vgId:%d, rid:%" PRId64 " is removed from rsetId:%" PRId64, vgId, rid, tsNodeRefId);
} }
int32_t syncSetStandby(int64_t rid) { int32_t syncSetStandby(int64_t rid) {
......
...@@ -164,6 +164,7 @@ ...@@ -164,6 +164,7 @@
./test.sh -f tsim/sma/drop_sma.sim ./test.sh -f tsim/sma/drop_sma.sim
./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim ./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim ./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
# --- valgrind # --- valgrind
./test.sh -f tsim/valgrind/checkError.sim -v ./test.sh -f tsim/valgrind/checkError.sim -v
......
...@@ -30,10 +30,10 @@ endw ...@@ -30,10 +30,10 @@ endw
return return
create database db vgroups 1024 buffer 3; sql create database db vgroups 1024 buffer 3;
use db; sql use db;
create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned); sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);
create table ct1 using stb tags(1000); sql create table ct1 using stb tags(1000);
create table ct2 using stb tags(1000) ; sql create table ct2 using stb tags(1000) ;
show db.tables; sql show db.tables;
insert into ct1 values(now+0s, 10, 2.0, 3.0); sql insert into ct1 values(now+0s, 10, 2.0, 3.0);
\ No newline at end of file \ No newline at end of file
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sleep 50
sql connect
print =============== create database with retentions
sql create database d0 retentions 5s:7d,5m:21d,15m:365d;
sql use d0
print =============== create super table and register rsma
sql create table if not exists stb (ts timestamp, c1 int, c2 float) tags (city binary(20),district binary(20)) rollup(max) max_delay 5s,5s watermark 2s,3s;
sql show stables
if $rows != 1 then
return -1
endi
print =============== create child table
sql create table ct1 using stb tags("BeiJing", "ChaoYang");
sql show tables
if $rows != 1 then
return -1
endi
print =============== insert data and trigger rollup
sql insert into ct1 values(now, 10, 10.0);
sql insert into ct1 values(now+1s, 1, 1.0);
sql insert into ct1 values(now+2s, 100, 100.0);
print =============== wait maxdelay 5+1 seconds for results
sleep 6000
print =============== select * from retention level 2 from memory
sql select * from ct1;
print $data00 $data01 $data02
if $rows > 2 then
print retention level 2 file rows $rows > 2
return -1
endi
if $data01 != 100 then
if $data01 != 10 then
print retention level 2 file result $data01 != 100 or 10
return -1
endi
endi
print =============== select * from retention level 1 from memory
sql select * from ct1 where ts > now-8d;
print $data00 $data01 $data02
if $rows > 2 then
print retention level 1 file rows $rows > 2
return -1
endi
if $data01 != 100 then
if $data01 != 10 then
print retention level 1 file result $data01 != 100 or 10
return -1
endi
endi
print =============== select * from retention level 0 from memory
sql select * from ct1 where ts > now-3d;
print $data00 $data01 $data02
print $data10 $data11 $data12
print $data20 $data21 $data22
if $rows < 1 then
print retention level 0 file rows $rows < 1
return -1
endi
if $data01 != 10 then
print retention level 0 file result $data01 != 10
return -1
endi
#===================================================================
#==================== reboot to trigger commit data to file
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
print =============== select * from retention level 2 from file
sql select * from ct1;
print $data00 $data01 $data02
if $rows > 2 then
print retention level 2 file rows $rows > 2
return -1
endi
if $data01 != 100 then
if $data01 != 10 then
print retention level 2 file result $data01 != 100 or 10
return -1
endi
endi
print =============== select * from retention level 1 from file
sql select * from ct1 where ts > now-8d;
print $data00 $data01 $data02
if $rows > 2 then
print retention level 1 file rows $rows > 2
return -1
endi
if $data01 != 100 then
if $data01 != 10 then
print retention level 1 file result $data01 != 100 or 10
return -1
endi
endi
print =============== select * from retention level 0 from file
sql select * from ct1 where ts > now-3d;
print $data00 $data01 $data02
print $data10 $data11 $data12
print $data20 $data21 $data22
if $rows < 1 then
print retention level 0 file rows $rows < 1
return -1
endi
if $data01 != 10 then
print retention level 0 file result $data01 != 10
return -1
endi
print =============== insert after rsma qtaskinfo recovery
sql insert into ct1 values(now, 50, 500.0);
sql insert into ct1 values(now+1s, 40, 40.0);
print =============== wait maxdelay 5+1 seconds for results
sleep 6000
print =============== select * from retention level 2 from file and memory after rsma qtaskinfo recovery
sql select * from ct1;
print $data00 $data01 $data02
if $rows > 2 then
print retention level 2 file/mem rows $rows > 2
return -1
endi
if $data01 != 100 then
if $data01 != 10 then
print retention level 2 file/mem result $data01 != 100 or 10
return -1
endi
endi
if $data02 != 500.00000 then
if $data02 != 100.00000 then
print retention level 1 file/mem result $data02 != 500.00000 or 100.00000
return -1
endi
endi
print =============== select * from retention level 1 from file and memory after rsma qtaskinfo recovery
sql select * from ct1 where ts > now-8d;
print $data00 $data01 $data02
if $rows > 2 then
print retention level 1 file/mem rows $rows > 2
return -1
endi
if $data01 != 100 then
if $data01 != 10 then
print retention level 1 file/mem result $data01 != 100 or 10
return -1
endi
endi
if $data02 != 500.00000 then
if $data02 != 100.00000 then
print retention level 1 file/mem result $data02 != 500.00000 or 100.00000
return -1
endi
endi
print =============== select * from retention level 0 from file and memory after rsma qtaskinfo recovery
sql select * from ct1 where ts > now-3d;
print $data00 $data01 $data02
print $data10 $data11 $data12
print $data20 $data21 $data22
print $data30 $data31 $data32
print $data40 $data41 $data42
if $rows < 1 then
print retention level 0 file/mem rows $rows < 1
return -1
endi
if $data01 != 10 then
print retention level 0 file/mem result $data01 != 10
return -1
endi
if $data11 != 1 then
print retention level 0 file/mem result $data11 != 1
return -1
endi
if $data21 != 100 then
print retention level 0 file/mem result $data21 != 100
return -1
endi
if $data31 != 50 then
print retention level 0 file/mem result $data31 != 50
return -1
endi
if $data32 != 500.00000 then
print retention level 0 file/mem result $data32 != 500.00000
return -1
endi
if $data41 != 40 then
print retention level 0 file/mem result $data41 != 40
return -1
endi
if $data42 != 40.00000 then
print retention level 0 file/mem result $data42 != 40.00000
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
system sh/stop_dnodes.sh system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1 system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start system sh/exec.sh -n dnode1 -s start -v
sql connect sql connect
sql create database d0 vgroups 1; print =============== step1: create drop show dnodes
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
print ====> dnode not ready!
return -1
endi
sql show dnodes
print ===> $data00 $data01 $data02 $data03 $data04 $data05
if $rows != 1 then
return -1
endi
goto _OVER
print =============== step2: create alter drop show user
sql create user u1 pass 'taosdata'
sql show users
sql alter user u1 sysinfo 1
sql alter user u1 enable 1
sql alter user u1 pass 'taosdata'
sql alter user u2 sysinfo 0
sql drop user u1
_OVER:
system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode1 -s stop -x SIGINT
...@@ -139,7 +139,7 @@ class TDTestCase: ...@@ -139,7 +139,7 @@ class TDTestCase:
) )
for i in range(4): for i in range(4):
tdSql.execute( tdSql.execute(
f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9): for i in range(9):
tdSql.execute( tdSql.execute(
......
...@@ -36,7 +36,7 @@ class TDTestCase: ...@@ -36,7 +36,7 @@ class TDTestCase:
''' '''
) )
for i in range(20): for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9): for i in range(9):
tdSql.execute( tdSql.execute(
......
...@@ -53,7 +53,7 @@ class TDTestCase: ...@@ -53,7 +53,7 @@ class TDTestCase:
''' '''
) )
for i in range(20): for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9): for i in range(9):
tdSql.execute( tdSql.execute(
......
...@@ -55,7 +55,7 @@ class TDTestCase: ...@@ -55,7 +55,7 @@ class TDTestCase:
''' '''
) )
for i in range(20): for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9): for i in range(9):
tdSql.execute( tdSql.execute(
......
...@@ -55,7 +55,7 @@ class TDTestCase: ...@@ -55,7 +55,7 @@ class TDTestCase:
''' '''
) )
for i in range(20): for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9): for i in range(9):
tdSql.execute( tdSql.execute(
......
...@@ -55,7 +55,7 @@ class TDTestCase: ...@@ -55,7 +55,7 @@ class TDTestCase:
''' '''
) )
for i in range(20): for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9): for i in range(9):
tdSql.execute( tdSql.execute(
......
...@@ -55,7 +55,7 @@ class TDTestCase: ...@@ -55,7 +55,7 @@ class TDTestCase:
''' '''
) )
for i in range(20): for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9): for i in range(9):
tdSql.execute( tdSql.execute(
......
...@@ -64,7 +64,7 @@ class TDTestCase: ...@@ -64,7 +64,7 @@ class TDTestCase:
''' '''
) )
for i in range(20): for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9): for i in range(9):
tdSql.execute( tdSql.execute(
......
...@@ -53,7 +53,7 @@ class TDTestCase: ...@@ -53,7 +53,7 @@ class TDTestCase:
''' '''
) )
for i in range(20): for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9): for i in range(9):
tdSql.execute( tdSql.execute(
......
...@@ -42,7 +42,7 @@ class TDTestCase: ...@@ -42,7 +42,7 @@ class TDTestCase:
) )
for i in range(4): for i in range(4):
tdSql.execute( tdSql.execute(
f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9): for i in range(9):
tdSql.execute( tdSql.execute(
......
...@@ -97,7 +97,7 @@ class TDTestCase: ...@@ -97,7 +97,7 @@ class TDTestCase:
) )
for i in range(4): for i in range(4):
tdSql.execute( tdSql.execute(
f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9): for i in range(9):
tdSql.execute( tdSql.execute(
......
...@@ -687,8 +687,8 @@ class TDTestCase: ...@@ -687,8 +687,8 @@ class TDTestCase:
# to_json() # to_json()
tdSql.query("select to_json('{\"abc\":123}') from jsons1_1") tdSql.query("select to_json('{\"abc\":123}') from jsons1_1")
tdSql.checkRows(2) tdSql.checkRows(2)
# tdSql.checkData(0, 0, '{"abc":123}') tdSql.checkData(0, 0, '{"abc":123}')
# tdSql.checkData(1, 0, '{"abc":123}') tdSql.checkData(1, 0, '{"abc":123}')
tdSql.query("select to_json('null') from jsons1_1") tdSql.query("select to_json('null') from jsons1_1")
tdSql.checkRows(2) tdSql.checkRows(2)
tdSql.checkData(0, 0, 'null') tdSql.checkData(0, 0, 'null')
......
...@@ -109,7 +109,7 @@ class TDTestCase: ...@@ -109,7 +109,7 @@ class TDTestCase:
''' '''
) )
for i in range(20): for i in range(20):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9): for i in range(9):
tdSql.execute( tdSql.execute(
......
...@@ -34,7 +34,7 @@ class TDTestCase: ...@@ -34,7 +34,7 @@ class TDTestCase:
) )
for i in range(self.tb_nums): for i in range(self.tb_nums):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
ts = self.ts ts = self.ts
for j in range(self.row_nums): for j in range(self.row_nums):
ts+=j*self.time_step ts+=j*self.time_step
......
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 1
self.ctbNum = 1
self.rowsPerTbl = 100000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 100000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
tdDnodes.start(1)
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
tmqCom.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 1
expectrowcnt = math.ceil(totalRowsInserted/3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
firstConsumeRows = resultList[0]
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(totalRowsInserted/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
secondConsumeRows = resultList[0]
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 3
expectrowcnt = math.ceil(totalRowsInserted/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
thirdConsumeRows = resultList[0]
if not (totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# total consume
actConsumeTotalRows = firstConsumeRows + secondConsumeRows + thirdConsumeRows
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 4
self.ctbNum = 3000
self.rowsPerTbl = 150
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 200,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
# tdDnodes.start(1)
tdDnodes.starttaosd(1)
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# tmqCom.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 20,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
tdLog.info("select result rows: %d"%totalRowsInserted)
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 1
expectrowcnt = math.ceil(totalRowsInserted/3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
firstConsumeRows = resultList[0]
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(totalRowsInserted*2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = firstConsumeRows + resultList[0]
if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows):
tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 4
self.ctbNum = 3000
self.rowsPerTbl = 70
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
# tdDnodes.start(1)
tdDnodes.starttaosd(1)
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# tmqCom.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 1
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
firstConsumeRows = resultList[0]
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = firstConsumeRows + resultList[0]
if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows):
tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 1
self.ctbNum = 1
self.rowsPerTbl = 1000000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
tdDnodes.start(1)
return
def tmqCase3(self):
tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 15,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 3
expectrowcnt = math.ceil(totalRowsInserted/3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
consumerId = 4
expectrowcnt = math.ceil(totalRowsInserted * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 3 end ...... ")
def tmqCase4(self):
tdLog.printNoPrefix("======== test case 4: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 25,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 5
expectrowcnt = math.ceil(totalRowsInserted)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:500, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
# time.sleep(10)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0]
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 4 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase3()
self.tmqCase4()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 4
self.ctbNum = 4000
self.rowsPerTbl = 150
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 200,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
tdDnodes.start(1)
return
def tmqCase3(self):
tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
tdLog.info("select result rows: %d"%totalRowsInserted)
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 3
expectrowcnt = math.ceil(totalRowsInserted/3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
consumerId = 4
expectrowcnt = math.ceil(totalRowsInserted*2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 3 end ...... ")
def tmqCase4(self):
tdLog.printNoPrefix("======== test case 4: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
tdLog.info("select result rows: %d"%totalRowsInserted)
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 5
expectrowcnt = math.ceil(totalRowsInserted)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:300, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
# tdLog.info("wait start consume notify")
# tmqCom.getStartConsumeNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
# time.sleep(10)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0]
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 4 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase3()
self.tmqCase4()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 4
self.ctbNum = 3000
self.rowsPerTbl = 70
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
tdDnodes.start(1)
return
def tmqCase3(self):
tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 3
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
consumerId = 4
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 3 end ...... ")
def tmqCase4(self):
tdLog.printNoPrefix("======== test case 4: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 5
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"])
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
# time.sleep(10)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0]
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 4 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase3()
self.tmqCase4()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
...@@ -165,3 +165,10 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg.py ...@@ -165,3 +165,10 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
Subproject commit c885e967e490105999b84d009a15168728dfafaf Subproject commit c3815951fc80617ecd171f3743b8b4a4d0bc712e
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册