提交 3bcab059 编写于 作者: haoranc's avatar haoranc

Merge branch 'main' of https://github.com/taosdata/TDengine into TD-23119

# 贡献者契约行为准则
[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v1.4%20adopted-ff69b4.svg)](code_of_conduct.md)
## 我们的承诺
为了营造一个开放和热情的环境,作为贡献者和维护者,我们承诺让每个人参与我们的项目和社区成为一种无骚扰的体验,无论年龄、体型、残疾、种族、性别特征、性别认同和表达、经验水平、教育、社会经济地位、国籍、个人外表、种族、宗教或性认同和取向如何。
## 我们的标准
有助于创造积极环境的行为示例包括:
- 使用热情和包容的语言
- 尊重不同的观点和经历
- 优雅地接受建设性的批评
- 专注于对社区最有利的事情
- 对其他社区成员表示同情
参与者不可接受的行为示例包括:
- 使用性感的语言或图像以及不受欢迎的性关注或进步
- 拖钓、侮辱/贬损评论以及人身或政治攻击
- 公共或私人骚扰
- 未经明确许可发布他人的私人信息,例如物理地址或电子地址
- 在专业环境中可能被合理认为不适当的其他行为
## 我们的责任
项目维护人员负责阐明可接受行为的标准,并期望针对任何不可接受行为的情况采取适当和公平的纠正措施。
项目维护者有权利和责任删除、编辑或拒绝评论、提交、代码、wiki 编辑、问题和其他不符合本行为准则的贡献,或暂时或永久禁止任何贡献者从事他们认为不适当、威胁、冒犯或有害的其他行为。
## 范围
本行为准则适用于所有项目空间,也适用于个人在公共场所代表项目或其社区时。 代表项目或社区的示例包括使用官方项目电子邮件地址、通过官方社交媒体帐户发布信息或在在线或离线活动中担任指定代表。 项目的表示可以由项目维护者进一步定义和澄清。
## 执法
可以通过 support@taosdata.com 联系项目团队来报告辱骂、骚扰或其他不可接受的行为。 所有投诉都将被审查和调查,并将产生被认为必要且适合具体情况的回应。 项目团队有义务对事件的报告者保密。 具体执行政策的更多细节可能会单独发布。
不善意遵守或执行行为准则的项目维护者可能会面临由项目领导的其他成员确定的临时或永久影响。
## 来源
本行为准则改编自贡献者公约 1.4 版,可在 https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 获取
有关此行为准则的常见问题的答案,请参阅 https://www.contributor-covenant.org/faq
\ No newline at end of file
...@@ -314,7 +314,7 @@ def pre_test_build_win() { ...@@ -314,7 +314,7 @@ def pre_test_build_win() {
cd %WIN_CONNECTOR_ROOT% cd %WIN_CONNECTOR_ROOT%
python.exe -m pip install --upgrade pip python.exe -m pip install --upgrade pip
python -m pip uninstall taospy -y python -m pip uninstall taospy -y
python -m pip install taospy==2.7.3 python -m pip install taospy==2.7.6
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
''' '''
return 1 return 1
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# taos-tools # taos-tools
ExternalProject_Add(taos-tools ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 41d4f95 GIT_TAG 41affde
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR "" BINARY_DIR ""
#BUILD_IN_SOURCE TRUE #BUILD_IN_SOURCE TRUE
......
...@@ -248,11 +248,11 @@ NULLS 语法用来指定 NULL 值在排序中输出的位置。NULLS LAST 是升 ...@@ -248,11 +248,11 @@ NULLS 语法用来指定 NULL 值在排序中输出的位置。NULLS LAST 是升
LIMIT 控制输出条数,OFFSET 指定从第几条之后开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。LIMIT 5 OFFSET 2 可以简写为 LIMIT 2, 5,都输出第 3 行到第 7 行数据。 LIMIT 控制输出条数,OFFSET 指定从第几条之后开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。LIMIT 5 OFFSET 2 可以简写为 LIMIT 2, 5,都输出第 3 行到第 7 行数据。
在有 PARTITION BY 子句时,LIMIT 控制的是每个切分的分片中的输出,而不是总的结果集输出。 在有 PARTITION BY/GROUP BY 子句时,LIMIT 控制的是每个切分的分片中的输出,而不是总的结果集输出。
## SLIMIT ## SLIMIT
SLIMIT 和 PARTITION BY 子句一起使用,用来控制输出的分片的数量。SLIMIT 5 SOFFSET 2 可以简写为 SLIMIT 2, 5,都表示输出第 3 个到第 7 个分片。 SLIMIT 和 PARTITION BY/GROUP BY 子句一起使用,用来控制输出的分片的数量。SLIMIT 5 SOFFSET 2 可以简写为 SLIMIT 2, 5,都表示输出第 3 个到第 7 个分片。
需要注意,如果有 ORDER BY 子句,则输出只有一个分片。 需要注意,如果有 ORDER BY 子句,则输出只有一个分片。
......
...@@ -642,7 +642,7 @@ int32_t* taosGetErrno(); ...@@ -642,7 +642,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INCORRECT_NUM_OF_COL TAOS_DEF_ERROR_CODE(0, 0x2634) #define TSDB_CODE_PAR_INCORRECT_NUM_OF_COL TAOS_DEF_ERROR_CODE(0, 0x2634)
#define TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL TAOS_DEF_ERROR_CODE(0, 0x2635) #define TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL TAOS_DEF_ERROR_CODE(0, 0x2635)
#define TSDB_CODE_PAR_OFFSET_LESS_ZERO TAOS_DEF_ERROR_CODE(0, 0x2637) #define TSDB_CODE_PAR_OFFSET_LESS_ZERO TAOS_DEF_ERROR_CODE(0, 0x2637)
#define TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY TAOS_DEF_ERROR_CODE(0, 0x2638) #define TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_GROUP_BY TAOS_DEF_ERROR_CODE(0, 0x2638)
#define TSDB_CODE_PAR_INVALID_TOPIC_QUERY TAOS_DEF_ERROR_CODE(0, 0x2639) #define TSDB_CODE_PAR_INVALID_TOPIC_QUERY TAOS_DEF_ERROR_CODE(0, 0x2639)
#define TSDB_CODE_PAR_INVALID_DROP_STABLE TAOS_DEF_ERROR_CODE(0, 0x263A) #define TSDB_CODE_PAR_INVALID_DROP_STABLE TAOS_DEF_ERROR_CODE(0, 0x263A)
#define TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE TAOS_DEF_ERROR_CODE(0, 0x263B) #define TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE TAOS_DEF_ERROR_CODE(0, 0x263B)
......
...@@ -750,7 +750,7 @@ void clearResultRowInitFlag(SqlFunctionCtx* pCtx, int32_t numOfOutput); ...@@ -750,7 +750,7 @@ void clearResultRowInitFlag(SqlFunctionCtx* pCtx, int32_t numOfOutput);
SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData, SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo, int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
bool isIntervalQuery, SAggSupporter* pSup); bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup);
// operator creater functions // operator creater functions
// clang-format off // clang-format off
SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode* pExNode, SExecTaskInfo* pTaskInfo); SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode* pExNode, SExecTaskInfo* pTaskInfo);
......
...@@ -195,9 +195,12 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, i ...@@ -195,9 +195,12 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, i
*/ */
SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData, SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo, int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
bool isIntervalQuery, SAggSupporter* pSup) { bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup) {
SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId); SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId);
if (!keepGroup) {
*(uint64_t*)pSup->keyBuf = calcGroupId(pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
}
SResultRowPosition* p1 = SResultRowPosition* p1 =
(SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); (SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
...@@ -1034,7 +1037,7 @@ void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uin ...@@ -1034,7 +1037,7 @@ void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uin
int32_t* rowEntryInfoOffset = pOperator->exprSupp.rowEntryInfoOffset; int32_t* rowEntryInfoOffset = pOperator->exprSupp.rowEntryInfoOffset;
SResultRow* pResultRow = doSetResultOutBufByKey(pAggInfo->aggSup.pResultBuf, pResultRowInfo, (char*)&groupId, SResultRow* pResultRow = doSetResultOutBufByKey(pAggInfo->aggSup.pResultBuf, pResultRowInfo, (char*)&groupId,
sizeof(groupId), true, groupId, pTaskInfo, false, &pAggInfo->aggSup); sizeof(groupId), true, groupId, pTaskInfo, false, &pAggInfo->aggSup, true);
/* /*
* not assign result buffer yet, add new result buffer * not assign result buffer yet, add new result buffer
* all group belong to one result set, and each group result has different group id so set the id to be one * all group belong to one result set, and each group result has different group id so set the id to be one
......
...@@ -277,6 +277,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) { ...@@ -277,6 +277,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
terrno = TSDB_CODE_SUCCESS; terrno = TSDB_CODE_SUCCESS;
int32_t num = 0; int32_t num = 0;
uint64_t groupId = 0;
for (int32_t j = 0; j < pBlock->info.rows; ++j) { for (int32_t j = 0; j < pBlock->info.rows; ++j) {
// Compare with the previous row of this column, and do not set the output buffer again if they are identical. // Compare with the previous row of this column, and do not set the output buffer again if they are identical.
if (!pInfo->isInit) { if (!pInfo->isInit) {
...@@ -473,6 +474,8 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* ...@@ -473,6 +474,8 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode*
initResultRowInfo(&pInfo->binfo.resultRowInfo); initResultRowInfo(&pInfo->binfo.resultRowInfo);
setOperatorInfo(pOperator, "GroupbyAggOperator", 0, true, OP_NOT_OPENED, pInfo, pTaskInfo); setOperatorInfo(pOperator, "GroupbyAggOperator", 0, true, OP_NOT_OPENED, pInfo, pTaskInfo);
pInfo->binfo.mergeResultBlock = pAggNode->mergeDataBlock;
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, hashGroupbyAggregate, NULL, destroyGroupOperatorInfo, pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, hashGroupbyAggregate, NULL, destroyGroupOperatorInfo,
optrDefaultBufFn, NULL); optrDefaultBufFn, NULL);
code = appendDownstream(pOperator, &downstream, 1); code = appendDownstream(pOperator, &downstream, 1);
...@@ -917,7 +920,7 @@ int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, ...@@ -917,7 +920,7 @@ int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo,
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx; SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
SResultRow* pResultRow = SResultRow* pResultRow =
doSetResultOutBufByKey(pBuf, pResultRowInfo, (char*)pData, bytes, true, groupId, pTaskInfo, false, pAggSup); doSetResultOutBufByKey(pBuf, pResultRowInfo, (char*)pData, bytes, true, groupId, pTaskInfo, false, pAggSup, false);
assert(pResultRow != NULL); assert(pResultRow != NULL);
setResultRowInitCtx(pResultRow, pCtx, numOfCols, pOperator->exprSupp.rowEntryInfoOffset); setResultRowInitCtx(pResultRow, pCtx, numOfCols, pOperator->exprSupp.rowEntryInfoOffset);
......
...@@ -580,7 +580,7 @@ void setFunctionResultOutput(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, SA ...@@ -580,7 +580,7 @@ void setFunctionResultOutput(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, SA
int64_t tid = 0; int64_t tid = 0;
int64_t groupId = 0; int64_t groupId = 0;
SResultRow* pRow = doSetResultOutBufByKey(pSup->pResultBuf, pResultRowInfo, (char*)&tid, sizeof(tid), true, groupId, SResultRow* pRow = doSetResultOutBufByKey(pSup->pResultBuf, pResultRowInfo, (char*)&tid, sizeof(tid), true, groupId,
pTaskInfo, false, pSup); pTaskInfo, false, pSup, true);
for (int32_t i = 0; i < numOfExprs; ++i) { for (int32_t i = 0; i < numOfExprs; ++i) {
struct SResultRowEntryInfo* pEntry = getResultEntryInfo(pRow, i, rowEntryInfoOffset); struct SResultRowEntryInfo* pEntry = getResultEntryInfo(pRow, i, rowEntryInfoOffset);
......
...@@ -78,7 +78,7 @@ static int32_t setTimeWindowOutputBuf(SResultRowInfo* pResultRowInfo, STimeWindo ...@@ -78,7 +78,7 @@ static int32_t setTimeWindowOutputBuf(SResultRowInfo* pResultRowInfo, STimeWindo
int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup, int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup,
SExecTaskInfo* pTaskInfo) { SExecTaskInfo* pTaskInfo) {
SResultRow* pResultRow = doSetResultOutBufByKey(pAggSup->pResultBuf, pResultRowInfo, (char*)&win->skey, TSDB_KEYSIZE, SResultRow* pResultRow = doSetResultOutBufByKey(pAggSup->pResultBuf, pResultRowInfo, (char*)&win->skey, TSDB_KEYSIZE,
masterscan, tableGroupId, pTaskInfo, true, pAggSup); masterscan, tableGroupId, pTaskInfo, true, pAggSup, true);
if (pResultRow == NULL) { if (pResultRow == NULL) {
*pResult = NULL; *pResult = NULL;
......
...@@ -3372,8 +3372,8 @@ static int32_t checkLimit(STranslateContext* pCxt, SSelectStmt* pSelect) { ...@@ -3372,8 +3372,8 @@ static int32_t checkLimit(STranslateContext* pCxt, SSelectStmt* pSelect) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_OFFSET_LESS_ZERO); return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_OFFSET_LESS_ZERO);
} }
if (NULL != pSelect->pSlimit && NULL == pSelect->pPartitionByList) { if (NULL != pSelect->pSlimit && (NULL == pSelect->pPartitionByList && NULL == pSelect->pGroupByList)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY); return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_GROUP_BY);
} }
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
......
...@@ -103,8 +103,8 @@ static char* getSyntaxErrFormat(int32_t errCode) { ...@@ -103,8 +103,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "Incorrect TIMESTAMP value: %s"; return "Incorrect TIMESTAMP value: %s";
case TSDB_CODE_PAR_OFFSET_LESS_ZERO: case TSDB_CODE_PAR_OFFSET_LESS_ZERO:
return "soffset/offset can not be less than 0"; return "soffset/offset can not be less than 0";
case TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY: case TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_GROUP_BY:
return "slimit/soffset only available for PARTITION BY query"; return "slimit/soffset only available for PARTITION/GROUP BY query";
case TSDB_CODE_PAR_INVALID_TOPIC_QUERY: case TSDB_CODE_PAR_INVALID_TOPIC_QUERY:
return "Invalid topic query"; return "Invalid topic query";
case TSDB_CODE_PAR_INVALID_DROP_STABLE: case TSDB_CODE_PAR_INVALID_DROP_STABLE:
......
...@@ -515,7 +515,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_ONLY_ONE_JSON_TAG, "Only one tag if ther ...@@ -515,7 +515,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_ONLY_ONE_JSON_TAG, "Only one tag if ther
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INCORRECT_NUM_OF_COL, "Query block has incorrect number of result columns") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INCORRECT_NUM_OF_COL, "Query block has incorrect number of result columns")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL, "Incorrect TIMESTAMP value") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL, "Incorrect TIMESTAMP value")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_OFFSET_LESS_ZERO, "soffset/offset can not be less than 0") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_OFFSET_LESS_ZERO, "soffset/offset can not be less than 0")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY, "slimit/soffset only available for PARTITION BY query") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_GROUP_BY, "slimit/soffset only available for PARTITION/GROUP BY query")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TOPIC_QUERY, "Invalid topic query") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TOPIC_QUERY, "Invalid topic query")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_DROP_STABLE, "Cannot drop super table in batch") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_DROP_STABLE, "Cannot drop super table in batch")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE, "Start(end) time of query range required or time range too large") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE, "Start(end) time of query range required or time range too large")
......
...@@ -108,49 +108,49 @@ class TDTestCase: ...@@ -108,49 +108,49 @@ class TDTestCase:
tdLog.info("%s" % cmd) tdLog.info("%s" % cmd)
os.system("%s" % cmd) os.system("%s" % cmd)
tdSql.execute("reset query cache") tdSql.execute("reset query cache")
tdSql.query("select count(*) from (select distinct(tbname) from db.stb2)") tdSql.query("select count(*) from (select distinct(tbname) from stmt_db.stb2)")
tdSql.checkData(0, 0, 8) tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.stb2") tdSql.query("select count(*) from stmt_db.stb2")
tdSql.checkData(0, 0, 160) tdSql.checkData(0, 0, 160)
tdSql.query("select * from information_schema.ins_databases") tdSql.query("select * from information_schema.ins_databases where name='stmt_db'")
tdSql.checkData(2, 14, "us") tdSql.checkData(0, 14, "us")
tdSql.execute("reset query cache") tdSql.execute("reset query cache")
tdSql.query("select count(*) from (select distinct(tbname) from db.`stb2-2`)") tdSql.query("select count(*) from (select distinct(tbname) from stmt_db.`stb2-2`)")
tdSql.checkData(0, 0, 8) tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.`stb2-2`") tdSql.query("select count(*) from stmt_db.`stb2-2`")
tdSql.checkData(0, 0, 160) tdSql.checkData(0, 0, 160)
cmd = "%s -f ./5-taos-tools/taosbenchmark/json/rest_auto_create_table.json" %binPath cmd = "%s -f ./5-taos-tools/taosbenchmark/json/rest_auto_create_table.json" %binPath
tdLog.info("%s" % cmd) tdLog.info("%s" % cmd)
os.system("%s" % cmd) os.system("%s" % cmd)
tdSql.execute("reset query cache") tdSql.execute("reset query cache")
tdSql.query("select count(*) from (select distinct(tbname) from db.stb3)") tdSql.query("select count(*) from (select distinct(tbname) from rest_db.stb3)")
tdSql.checkData(0, 0, 8) tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.stb3") tdSql.query("select count(*) from rest_db.stb3")
tdSql.checkData(0, 0, 160) tdSql.checkData(0, 0, 160)
tdSql.query("select * from information_schema.ins_databases") tdSql.query("select * from information_schema.ins_databases where name='rest_db'")
tdSql.checkData(2, 14, "ns") tdSql.checkData(0, 14, "ns")
tdSql.execute("reset query cache") tdSql.execute("reset query cache")
tdSql.query("select count(*) from (select distinct(tbname) from db.`stb3-2`)") tdSql.query("select count(*) from (select distinct(tbname) from rest_db.`stb3-2`)")
tdSql.checkData(0, 0, 8) tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.`stb3-2`") tdSql.query("select count(*) from rest_db.`stb3-2`")
tdSql.checkData(0, 0, 160) tdSql.checkData(0, 0, 160)
cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_auto_create_table.json" %binPath cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_auto_create_table.json" %binPath
tdLog.info("%s" % cmd) tdLog.info("%s" % cmd)
os.system("%s" % cmd) os.system("%s" % cmd)
tdSql.execute("reset query cache") tdSql.execute("reset query cache")
tdSql.query("select count(*) from (select distinct(tbname) from db.stb4)") tdSql.query("select count(*) from (select distinct(tbname) from sml_db.stb4)")
tdSql.checkData(0, 0, 8) tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.stb4") tdSql.query("select count(*) from sml_db.stb4")
tdSql.checkData(0, 0, 160) tdSql.checkData(0, 0, 160)
tdSql.execute("reset query cache") tdSql.execute("reset query cache")
tdSql.query("select count(*) from (select distinct(tbname) from db.`stb4-2`)") tdSql.query("select count(*) from (select distinct(tbname) from sml_db.`stb4-2`)")
tdSql.checkData(0, 0, 8) tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.`stb4-2`") tdSql.query("select count(*) from sml_db.`stb4-2`")
tdSql.checkData(0, 0, 160) tdSql.checkData(0, 0, 160)
tAdapter.stop() tAdapter.stop()
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
"num_of_records_per_req": 10, "num_of_records_per_req": 10,
"databases": [{ "databases": [{
"dbinfo": { "dbinfo": {
"name": "db", "name": "rest_db",
"drop": "yes", "drop": "yes",
"replica": 1, "replica": 1,
"precision": "ns", "precision": "ns",
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
"num_of_records_per_req": 10, "num_of_records_per_req": 10,
"databases": [{ "databases": [{
"dbinfo": { "dbinfo": {
"name": "db", "name": "sml_db",
"drop": "yes", "drop": "yes",
"replica": 1, "replica": 1,
"precision": "ms", "precision": "ms",
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
"num_of_records_per_req": 10, "num_of_records_per_req": 10,
"databases": [{ "databases": [{
"dbinfo": { "dbinfo": {
"name": "db", "name": "stmt_db",
"drop": "yes", "drop": "yes",
"replica": 1, "replica": 1,
"precision": "us", "precision": "us",
......
...@@ -178,6 +178,7 @@ ...@@ -178,6 +178,7 @@
,,y,script,./test.sh -f tsim/parser/top_groupby.sim ,,y,script,./test.sh -f tsim/parser/top_groupby.sim
,,y,script,./test.sh -f tsim/parser/topbot.sim ,,y,script,./test.sh -f tsim/parser/topbot.sim
,,y,script,./test.sh -f tsim/parser/union_sysinfo.sim ,,y,script,./test.sh -f tsim/parser/union_sysinfo.sim
,,y,script,./test.sh -f tsim/parser/slimit_limit.sim
,,y,script,./test.sh -f tsim/query/tagLikeFilter.sim ,,y,script,./test.sh -f tsim/query/tagLikeFilter.sim
,,y,script,./test.sh -f tsim/query/charScalarFunction.sim ,,y,script,./test.sh -f tsim/query/charScalarFunction.sim
,,y,script,./test.sh -f tsim/query/explain.sim ,,y,script,./test.sh -f tsim/query/explain.sim
......
...@@ -70,10 +70,10 @@ ulimit -c unlimited ...@@ -70,10 +70,10 @@ ulimit -c unlimited
md5sum /usr/lib/libtaos.so.1 md5sum /usr/lib/libtaos.so.1
md5sum /home/TDinternal/debug/build/lib/libtaos.so md5sum /home/TDinternal/debug/build/lib/libtaos.so
#define taospy 2.7.3 #define taospy 2.7.6
pip3 list|grep taospy pip3 list|grep taospy
pip3 uninstall taospy -y pip3 uninstall taospy -y
pip3 install taospy==2.7.3 pip3 install taospy==2.7.6
$TIMEOUT_CMD $cmd $TIMEOUT_CMD $cmd
RET=$? RET=$?
......
...@@ -415,12 +415,12 @@ if $data03 != 0 then ...@@ -415,12 +415,12 @@ if $data03 != 0 then
return -1 return -1
endi endi
sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 limit 1; sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 slimit 1;
if $rows != 1 then if $rows != 1 then
return -1 return -1
endi endi
sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 limit 20 offset 9990; sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 slimit 20 soffset 9990;
if $rows != 10 then if $rows != 10 then
return -1 return -1
endi endi
......
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
sql drop database if exists db1;
sql create database db1 vgroups 1;
sql use db1;
sql create stable sta (ts timestamp, f1 int, f2 binary(200)) tags(t1 int, t2 int, t3 int);
sql create table tba1 using sta tags(1, 1, 1);
sql create table tba2 using sta tags(2, 2, 2);
sql create table tba3 using sta tags(3, 3, 3);
sql create table tba4 using sta tags(4, 4, 4);
sql create table tba5 using sta tags(5, 5, 5);
sql create table tba6 using sta tags(6, 6, 6);
sql create table tba7 using sta tags(7, 7, 7);
sql create table tba8 using sta tags(8, 8, 8);
sql create index index1 on sta (t2);
sql insert into tba1 values ('2022-04-26 15:15:01', 1, "a");
sql insert into tba1 values ('2022-04-26 15:15:02', 11, "a");
sql insert into tba2 values ('2022-04-26 15:15:01', 2, "a");
sql insert into tba2 values ('2022-04-26 15:15:02', 22, "a");
sql insert into tba3 values ('2022-04-26 15:15:01', 3, "a");
sql insert into tba4 values ('2022-04-26 15:15:01', 4, "a");
sql insert into tba5 values ('2022-04-26 15:15:01', 5, "a");
sql insert into tba6 values ('2022-04-26 15:15:01', 6, "a");
sql insert into tba7 values ('2022-04-26 15:15:01', 7, "a");
sql insert into tba8 values ('2022-04-26 15:15:01', 8, "a");
sql select t1,count(*) from sta group by t1 limit 1;
if $rows != 8 then
return -1
endi
sql select t1,count(*) from sta group by t1 slimit 1;
if $rows != 1 then
return -1
endi
sql select f1,count(*) from sta group by f1 limit 1;
if $rows != 10 then
return -1
endi
sql select f1,count(*) from sta group by f1 slimit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 limit 1;
if $rows != 10 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 slimit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 limit 1;
if $rows != 10 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 slimit 1;
if $rows != 1 then
return -1
endi
sql select t1,count(*) from sta group by t1 order by t1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,count(*) from sta group by t1 order by t1 slimit 1;
if $rows != 8 then
return -1
endi
sql select f1,count(*) from sta group by f1 order by f1 limit 1;
if $rows != 1 then
return -1
endi
sql select f1,count(*) from sta group by f1 order by f1 slimit 1;
if $rows != 10 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 order by t1,f1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 order by t1,f1 slimit 1;
if $rows != 10 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 order by f1,t1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 order by f1,t1 slimit 1;
if $rows != 10 then
return -1
endi
sql select t1,count(*) from sta group by t1 slimit 1 limit 1;
if $rows != 1 then
return -1
endi
sql select f1,count(*) from sta group by f1 slimit 1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 slimit 1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 slimit 1 limit 1;
if $rows != 1 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
...@@ -11,45 +11,42 @@ ...@@ -11,45 +11,42 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os import os
import socket
import subprocess
import random import random
import socket
import string import string
import random import subprocess
import sys
import time
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
from util.log import * import taos
from util.sql import *
from util.cases import * from util.cases import *
from util.common import * from util.common import *
from util.dnodes import *
from util.dnodes import TDDnode, TDDnodes
from util.log import *
from util.sql import *
from util.sqlset import * from util.sqlset import *
from util.dnodes import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
# #
# -------------- util -------------------------- # -------------- util --------------------------
# #
def pathSize(path): def pathSize(path):
total_size = 0 total_size = 0
for dirpath, dirnames, filenames in os.walk(path): for dirpath, dirnames, filenames in os.walk(path):
for i in filenames: for i in filenames:
#use join to concatenate all the components of path # use join to concatenate all the components of path
f = os.path.join(dirpath, i) f = os.path.join(dirpath, i)
#use getsize to generate size in bytes and add it to the total size # use getsize to generate size in bytes and add it to the total size
total_size += os.path.getsize(f) total_size += os.path.getsize(f)
#print(dirpath) # print(dirpath)
print(" %s %.02f MB"%(path, total_size/1024/1024)) print(" %s %.02f MB" % (path, total_size/1024/1024))
return total_size return total_size
''' '''
total = 0 total = 0
with os.scandir(path) as it: with os.scandir(path) as it:
...@@ -67,24 +64,27 @@ def pathSize(path): ...@@ -67,24 +64,27 @@ def pathSize(path):
# --------------- cluster ------------------------ # --------------- cluster ------------------------
# #
class MyDnodes(TDDnodes): class MyDnodes(TDDnodes):
def __init__(self ,dnodes_lists): def __init__(self, dnodes_lists):
super(MyDnodes,self).__init__() super(MyDnodes, self).__init__()
self.dnodes = dnodes_lists # dnode must be TDDnode instance self.dnodes = dnodes_lists # dnode must be TDDnode instance
self.simDeployed = False self.simDeployed = False
class TagCluster: class TagCluster:
noConn = True noConn = True
def init(self, conn, logSql, replicaVar=1): def init(self, conn, logSql, replicaVar=1):
tdLog.debug(f"start to excute {__file__}") tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None self.TDDnodes = None
self.depoly_cluster(5) self.depoly_cluster(5)
self.master_dnode = self.TDDnodes.dnodes[0] self.master_dnode = self.TDDnodes.dnodes[0]
self.host=self.master_dnode.cfgDict["fqdn"] self.host = self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir) conn1 = taos.connect(
self.master_dnode.cfgDict["fqdn"], config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor()) tdSql.init(conn1.cursor())
def getBuildPath(self): def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__)) selfPath = os.path.dirname(os.path.realpath(__file__))
...@@ -101,8 +101,7 @@ class TagCluster: ...@@ -101,8 +101,7 @@ class TagCluster:
break break
return buildPath return buildPath
def depoly_cluster(self, dnodes_nums):
def depoly_cluster(self ,dnodes_nums):
testCluster = False testCluster = False
valgrind = 0 valgrind = 0
...@@ -126,7 +125,7 @@ class TagCluster: ...@@ -126,7 +125,7 @@ class TagCluster:
self.TDDnodes.setAsan(tdDnodes.getAsan()) self.TDDnodes.setAsan(tdDnodes.getAsan())
self.TDDnodes.stopAll() self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes: for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{}) self.TDDnodes.deploy(dnode.index, {})
for dnode in self.TDDnodes.dnodes: for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index) self.TDDnodes.starttaosd(dnode.index)
...@@ -136,7 +135,8 @@ class TagCluster: ...@@ -136,7 +135,8 @@ class TagCluster:
sql = "" sql = ""
for dnode in self.TDDnodes.dnodes[1:]: for dnode in self.TDDnodes.dnodes[1:]:
# print(dnode.cfgDict) # print(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] dnode_id = dnode.cfgDict["fqdn"] + \
":" + dnode.cfgDict["serverPort"]
if dnode_first_host == "": if dnode_first_host == "":
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0] dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1] dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
...@@ -145,18 +145,17 @@ class TagCluster: ...@@ -145,18 +145,17 @@ class TagCluster:
cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s " cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s "
cmd += f'"{sql}"' cmd += f'"{sql}"'
print(cmd) print(cmd)
os.system(cmd) os.system(cmd)
time.sleep(2) time.sleep(2)
tdLog.info(" create cluster done! ") tdLog.info(" create cluster done! ")
def getConnection(self, dnode): def getConnection(self, dnode):
host = dnode.cfgDict["fqdn"] host = dnode.cfgDict["fqdn"]
port = dnode.cfgDict["serverPort"] port = dnode.cfgDict["serverPort"]
config_dir = dnode.cfgDir config_dir = dnode.cfgDir
return taos.connect(host=host, port=int(port), config=config_dir) return taos.connect(host=host, port=int(port), config=config_dir)
def run(self): def run(self):
tdLog.info(" create cluster ok.") tdLog.info(" create cluster ok.")
...@@ -168,22 +167,22 @@ class TagCluster: ...@@ -168,22 +167,22 @@ class TagCluster:
class PerfDB: class PerfDB:
def __init__(self): def __init__(self):
self.sqls = [] self.sqls = []
self.spends= [] self.spends = []
# execute # execute
def execute(self, sql): def execute(self, sql):
print(f" perfdb execute {sql}") print(f" perfdb execute {sql}")
stime = time.time() stime = time.time()
ret = tdSql.execute(sql, 1) ret = tdSql.execute(sql, 1)
spend = time.time() - stime spend = time.time() - stime
self.sqls.append(sql) self.sqls.append(sql)
self.spends.append(spend) self.spends.append(spend)
return ret return ret
# query # query
def query(self, sql): def query(self, sql):
print(f" perfdb query {sql}") print(f" perfdb query {sql}")
start = time.time() start = time.time()
ret = tdSql.query(sql, None, 1) ret = tdSql.query(sql, None, 1)
spend = time.time() - start spend = time.time() - start
...@@ -203,9 +202,9 @@ class TDTestCase: ...@@ -203,9 +202,9 @@ class TDTestCase:
self.tagCluster = TagCluster() self.tagCluster = TagCluster()
self.tagCluster.init(conn, logSql, replicaVar) self.tagCluster.init(conn, logSql, replicaVar)
self.lenBinary = 64 self.lenBinary = 64
self.lenNchar = 32 self.lenNchar = 32
# column # column
self.column_dict = { self.column_dict = {
'ts': 'timestamp', 'ts': 'timestamp',
'col1': 'tinyint', 'col1': 'tinyint',
...@@ -252,14 +251,14 @@ class TDTestCase: ...@@ -252,14 +251,14 @@ class TDTestCase:
# query # query
def query(self, sql): def query(self, sql):
return self.dbs[self.cur].query(sql) return self.dbs[self.cur].query(sql)
def set_stb_sql(self,stbname,column_dict,tag_dict): def set_stb_sql(self, stbname, column_dict, tag_dict):
column_sql = '' column_sql = ''
tag_sql = '' tag_sql = ''
for k,v in column_dict.items(): for k, v in column_dict.items():
column_sql += f"{k} {v}, " column_sql += f"{k} {v}, "
for k,v in tag_dict.items(): for k, v in tag_dict.items():
tag_sql += f"{k} {v}, " tag_sql += f"{k} {v}, "
create_stb_sql = f'create stable {stbname} ({column_sql[:-2]}) tags ({tag_sql[:-2]})' create_stb_sql = f'create stable {stbname} ({column_sql[:-2]}) tags ({tag_sql[:-2]})'
return create_stb_sql return create_stb_sql
...@@ -268,37 +267,41 @@ class TDTestCase: ...@@ -268,37 +267,41 @@ class TDTestCase:
def create_database(self, dbname, vgroups, replica): def create_database(self, dbname, vgroups, replica):
sql = f'create database {dbname} vgroups {vgroups} replica {replica}' sql = f'create database {dbname} vgroups {vgroups} replica {replica}'
tdSql.execute(sql) tdSql.execute(sql)
#tdSql.execute(sql) # tdSql.execute(sql)
tdSql.execute(f'use {dbname}') tdSql.execute(f'use {dbname}')
# create stable and child tables # create stable and child tables
def create_table(self, stbname, tbname, count): def create_table(self, stbname, tbname, count):
# create stable # create stable
create_table_sql = self.set_stb_sql(stbname, self.column_dict, self.tag_dict) create_table_sql = self.set_stb_sql(
stbname, self.column_dict, self.tag_dict)
tdSql.execute(create_table_sql) tdSql.execute(create_table_sql)
# create child table # create child table
tdLog.info(f" start create {count} child tables.") tdLog.info(f" start create {count} child tables.")
for i in range(count): batchSql = ""
ti = i % 128 batchSize = 5000
binTxt = self.random_string(self.lenBinary) for i in range(int(count/batchSize)):
tags = f'{ti},{ti},{i},{i},{ti},{ti},{i},{i},{i}.000{i},{i}.000{i},true,"{binTxt}","nch{i}",now' batchSql = "create table"
sql = f'create table {tbname}{i} using {stbname} tags({tags})' for j in range(batchSize):
tdSql.execute(sql) ti = (i * batchSize + j) % 128
if i > 0 and i % 1000 == 0: binTxt = self.random_string(self.lenBinary)
tdLog.info(f" child table count = {i}") idx = i * batchSize + j
tags = f'{ti},{ti},{idx},{idx},{ti},{ti},{idx},{idx},{idx}.000{idx},{idx}.000{idx},true,"{binTxt}","nch{idx}",now'
sql = f'{tbname}{idx} using {stbname} tags({tags})'
batchSql = batchSql + " " + sql
tdSql.execute(batchSql)
tdLog.info(f" child table count = {i * batchSize}")
tdLog.info(f" end create {count} child tables.")
# create stable and child tables # create stable and child tables
def create_tagidx(self, stbname): def create_tagidx(self, stbname):
cnt = -1 cnt = -1
for key in self.tag_dict.keys(): for key in self.tag_dict.keys():
# first tag have default index, so skip # first tag have default index, so skip
if cnt == -1: if cnt == -1:
cnt = 0 cnt = 0
continue; continue
sql = f'create index idx_{key} on {stbname} ({key})' sql = f'create index idx_{key} on {stbname} ({key})'
tdLog.info(f" sql={sql}") tdLog.info(f" sql={sql}")
tdSql.execute(sql) tdSql.execute(sql)
...@@ -309,11 +312,11 @@ class TDTestCase: ...@@ -309,11 +312,11 @@ class TDTestCase:
def insert_data(self, tbname): def insert_data(self, tbname):
# d1 insert 3 rows # d1 insert 3 rows
for i in range(3): for i in range(3):
sql = f'insert into {tbname}1(ts,col1) values(now+{i}s,{i});' sql = f'insert into {tbname}1(ts,col1) values(now+{i}s,{i});'
tdSql.execute(sql) tdSql.execute(sql)
# d20 insert 4 # d20 insert 4
for i in range(4): for i in range(4):
sql = f'insert into {tbname}20(ts,col1) values(now+{i}s,{i});' sql = f'insert into {tbname}20(ts,col1) values(now+{i}s,{i});'
tdSql.execute(sql) tdSql.execute(sql)
# check show indexs # check show indexs
...@@ -376,17 +379,17 @@ class TDTestCase: ...@@ -376,17 +379,17 @@ class TDTestCase:
self.query(sql) self.query(sql)
tdSql.checkRows(4) tdSql.checkRows(4)
# drop child table # drop child table
def drop_tables(self, tbname, count): def drop_tables(self, tbname, count):
# table d1 and d20 have verify data , so can not drop # table d1 and d20 have verify data , so can not drop
start = random.randint(21, count/2) start = random.randint(21, count/2)
end = random.randint(count/2 + 1, count - 1) end = random.randint(count/2 + 1, count - 1)
for i in range(start, end): for i in range(start, end):
sql = f'drop table {tbname}{i}' sql = f'drop table {tbname}{i}'
tdSql.execute(sql) tdSql.execute(sql)
cnt = end - start + 1 cnt = end - start + 1
tdLog.info(f' drop table from {start} to {end} count={cnt}') tdLog.info(f' drop table from {start} to {end} count={cnt}')
# drop tag index # drop tag index
def drop_tagidx(self, dbname, stbname): def drop_tagidx(self, dbname, stbname):
...@@ -396,11 +399,11 @@ class TDTestCase: ...@@ -396,11 +399,11 @@ class TDTestCase:
# first tag have default index, so skip # first tag have default index, so skip
if cnt == -1: if cnt == -1:
cnt = 0 cnt = 0
continue; continue
sql = f'drop index idx_{key}' sql = f'drop index idx_{key}'
tdSql.execute(sql) tdSql.execute(sql)
cnt += 1 cnt += 1
# check idx result is 0 # check idx result is 0
sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="{dbname}"' sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="{dbname}"'
tdSql.query(sql) tdSql.query(sql)
...@@ -408,17 +411,19 @@ class TDTestCase: ...@@ -408,17 +411,19 @@ class TDTestCase:
tdLog.info(f' drop {cnt} tag indexs ok.') tdLog.info(f' drop {cnt} tag indexs ok.')
# show performance # show performance
def show_performance(self, count) : def show_performance(self, count):
db = self.dbs[0] db = self.dbs[0]
db1 = self.dbs[1] db1 = self.dbs[1]
cnt = len(db.sqls) cnt = len(db.sqls)
cnt1 = len(db1.sqls) cnt1 = len(db1.sqls)
if cnt != len(db1.sqls): if cnt != len(db1.sqls):
tdLog.info(f" datebase sql count not equal. cnt={cnt} cnt1={cnt1}\n") tdLog.info(
f" datebase sql count not equal. cnt={cnt} cnt1={cnt1}\n")
return False return False
tdLog.info(f" database sql cnt ={cnt}") tdLog.info(f" database sql cnt ={cnt}")
print(f" ----------------- performance (child tables = {count})--------------------") print(
f" ----------------- performance (child tables = {count})--------------------")
print(" No time(index) time(no-index) diff(col3-col2) rate(col2/col3) sql") print(" No time(index) time(no-index) diff(col3-col2) rate(col2/col3) sql")
for i in range(cnt): for i in range(cnt):
key = db.sqls[i] key = db.sqls[i]
...@@ -427,12 +432,13 @@ class TDTestCase: ...@@ -427,12 +432,13 @@ class TDTestCase:
value1 = db1.spends[i] value1 = db1.spends[i]
diff = value1 - value diff = value1 - value
rate = value/value1*100 rate = value/value1*100
print(" %d %.3fs %.3fs %.3fs %d%% %s"%(i+1, value, value1, diff, rate, key)) print(" %d %.3fs %.3fs %.3fs %d%% %s" % (
i+1, value, value1, diff, rate, key))
print(" --------------------- end ------------------------") print(" --------------------- end ------------------------")
return True return True
def show_diskspace(self): def show_diskspace(self):
#calc # calc
selfPath = os.path.dirname(os.path.realpath(__file__)) selfPath = os.path.dirname(os.path.realpath(__file__))
projPath = "" projPath = ""
if ("community" in selfPath): if ("community" in selfPath):
...@@ -451,43 +457,41 @@ class TDTestCase: ...@@ -451,43 +457,41 @@ class TDTestCase:
idx_size = vnode2_size + vnode3_size idx_size = vnode2_size + vnode3_size
noidx_size = vnode4_size + vnode5_size noidx_size = vnode4_size + vnode5_size
print(" index = %.02f M"%(idx_size/1024/1024)) print(" index = %.02f M" % (idx_size/1024/1024))
print(" no-index = %.02f M"%(noidx_size/1024/1024)) print(" no-index = %.02f M" % (noidx_size/1024/1024))
print(" index/no-index = %.2f multiple"%(idx_size/noidx_size)) print(" index/no-index = %.2f multiple" % (idx_size/noidx_size))
print(" -------------------- end ------------------------") print(" -------------------- end ------------------------")
# main # main
def testdb(self, dbname, stable, tbname, count, createidx): def testdb(self, dbname, stable, tbname, count, createidx):
# cur # cur
if createidx: if createidx:
self.cur = 0 self.cur = 0
else : else:
self.cur = 1 self.cur = 1
# do # do
self.create_database(dbname, 2, 1) self.create_database(dbname, 2, 1)
self.create_table(stable, tbname, count) self.create_table(stable, tbname, count)
if(createidx): if (createidx):
self.create_tagidx(stable) self.create_tagidx(stable)
self.insert_data(tbname) self.insert_data(tbname)
if(createidx): if (createidx):
self.show_tagidx(dbname,stable) self.show_tagidx(dbname, stable)
self.query_tagidx(stable) self.query_tagidx(stable)
#self.drop_tables(tbname, count) # self.drop_tables(tbname, count)
#if(createidx): # if(createidx):
# self.drop_tagidx(dbname, stable) # self.drop_tagidx(dbname, stable)
# query after delete , expect no crash # query after delete , expect no crash
#self.query_tagidx(stable) # self.query_tagidx(stable)
tdSql.execute(f'flush database {dbname}') tdSql.execute(f'flush database {dbname}')
# run # run
def run(self): def run(self):
self.tagCluster.run() self.tagCluster.run()
# var # var
dbname = "tagindex" dbname = "tagindex"
dbname1 = dbname + "1" dbname1 = dbname + "1"
...@@ -511,10 +515,10 @@ class TDTestCase: ...@@ -511,10 +515,10 @@ class TDTestCase:
self.show_diskspace() self.show_diskspace()
def stop(self): def stop(self):
self.tagCluster.stop() self.tagCluster.stop()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
...@@ -171,6 +171,7 @@ class TDTestCase: ...@@ -171,6 +171,7 @@ class TDTestCase:
if any(parm in condition.lower().strip() for parm in condition_exception): if any(parm in condition.lower().strip() for parm in condition_exception):
print(f"case in {line}: ", end='') print(f"case in {line}: ", end='')
print(f"condition : {condition}: ", end='')
return tdSql.error(self.sample_query_form( return tdSql.error(self.sample_query_form(
sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr, sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
table_expr=table_expr, condition=condition table_expr=table_expr, condition=condition
...@@ -391,16 +392,6 @@ class TDTestCase: ...@@ -391,16 +392,6 @@ class TDTestCase:
self.checksample(**case25) self.checksample(**case25)
case26 = {"k": 1000} case26 = {"k": 1000}
self.checksample(**case26) self.checksample(**case26)
case27 = {
"table_expr": f"{DBNAME}.stb1",
"condition": "group by tbname slimit 1 "
}
self.checksample(**case27) # with slimit
case28 = {
"table_expr": f"{DBNAME}.stb1",
"condition": "group by tbname slimit 1 soffset 1"
}
self.checksample(**case28) # with soffset
pass pass
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册