提交 c3b0245f 编写于 作者: G Ganlin Zhao

Merge branch 'main' into fix/TD-23087

# 贡献者契约行为准则
[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v1.4%20adopted-ff69b4.svg)](code_of_conduct.md)
## 我们的承诺
为了营造一个开放和热情的环境,作为贡献者和维护者,我们承诺让每个人参与我们的项目和社区成为一种无骚扰的体验,无论年龄、体型、残疾、种族、性别特征、性别认同和表达、经验水平、教育、社会经济地位、国籍、个人外表、种族、宗教或性认同和取向如何。
## 我们的标准
有助于创造积极环境的行为示例包括:
- 使用热情和包容的语言
- 尊重不同的观点和经历
- 优雅地接受建设性的批评
- 专注于对社区最有利的事情
- 对其他社区成员表示同情
参与者不可接受的行为示例包括:
- 使用性感的语言或图像以及不受欢迎的性关注或进步
- 拖钓、侮辱/贬损评论以及人身或政治攻击
- 公共或私人骚扰
- 未经明确许可发布他人的私人信息,例如物理地址或电子地址
- 在专业环境中可能被合理认为不适当的其他行为
## 我们的责任
项目维护人员负责阐明可接受行为的标准,并期望针对任何不可接受行为的情况采取适当和公平的纠正措施。
项目维护者有权利和责任删除、编辑或拒绝评论、提交、代码、wiki 编辑、问题和其他不符合本行为准则的贡献,或暂时或永久禁止任何贡献者从事他们认为不适当、威胁、冒犯或有害的其他行为。
## 范围
本行为准则适用于所有项目空间,也适用于个人在公共场所代表项目或其社区时。 代表项目或社区的示例包括使用官方项目电子邮件地址、通过官方社交媒体帐户发布信息或在在线或离线活动中担任指定代表。 项目的表示可以由项目维护者进一步定义和澄清。
## 执法
可以通过 support@taosdata.com 联系项目团队来报告辱骂、骚扰或其他不可接受的行为。 所有投诉都将被审查和调查,并将产生被认为必要且适合具体情况的回应。 项目团队有义务对事件的报告者保密。 具体执行政策的更多细节可能会单独发布。
不善意遵守或执行行为准则的项目维护者可能会面临由项目领导的其他成员确定的临时或永久影响。
## 来源
本行为准则改编自贡献者公约 1.4 版,可在 https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 获取
有关此行为准则的常见问题的答案,请参阅 https://www.contributor-covenant.org/faq
\ No newline at end of file
......@@ -313,7 +313,8 @@ def pre_test_build_win() {
bat '''
cd %WIN_CONNECTOR_ROOT%
python.exe -m pip install --upgrade pip
python -m pip install .
python -m pip uninstall taospy -y
python -m pip install taospy==2.7.6
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
'''
return 1
......@@ -331,8 +332,6 @@ def run_win_test() {
bat '''
echo "windows test ..."
cd %WIN_CONNECTOR_ROOT%
python.exe -m pip install --upgrade pip
python -m pip install .
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
ls -l C:\\Windows\\System32\\taos.dll
time /t
......
......@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 41d4f95
GIT_TAG 41affde
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
......
......@@ -248,11 +248,11 @@ NULLS 语法用来指定 NULL 值在排序中输出的位置。NULLS LAST 是升
LIMIT 控制输出条数,OFFSET 指定从第几条之后开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。LIMIT 5 OFFSET 2 可以简写为 LIMIT 2, 5,都输出第 3 行到第 7 行数据。
在有 PARTITION BY 子句时,LIMIT 控制的是每个切分的分片中的输出,而不是总的结果集输出。
在有 PARTITION BY/GROUP BY 子句时,LIMIT 控制的是每个切分的分片中的输出,而不是总的结果集输出。
## SLIMIT
SLIMIT 和 PARTITION BY 子句一起使用,用来控制输出的分片的数量。SLIMIT 5 SOFFSET 2 可以简写为 SLIMIT 2, 5,都表示输出第 3 个到第 7 个分片。
SLIMIT 和 PARTITION BY/GROUP BY 子句一起使用,用来控制输出的分片的数量。SLIMIT 5 SOFFSET 2 可以简写为 SLIMIT 2, 5,都表示输出第 3 个到第 7 个分片。
需要注意,如果有 ORDER BY 子句,则输出只有一个分片。
......
......@@ -642,7 +642,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INCORRECT_NUM_OF_COL TAOS_DEF_ERROR_CODE(0, 0x2634)
#define TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL TAOS_DEF_ERROR_CODE(0, 0x2635)
#define TSDB_CODE_PAR_OFFSET_LESS_ZERO TAOS_DEF_ERROR_CODE(0, 0x2637)
#define TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY TAOS_DEF_ERROR_CODE(0, 0x2638)
#define TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_GROUP_BY TAOS_DEF_ERROR_CODE(0, 0x2638)
#define TSDB_CODE_PAR_INVALID_TOPIC_QUERY TAOS_DEF_ERROR_CODE(0, 0x2639)
#define TSDB_CODE_PAR_INVALID_DROP_STABLE TAOS_DEF_ERROR_CODE(0, 0x263A)
#define TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE TAOS_DEF_ERROR_CODE(0, 0x263B)
......
......@@ -506,6 +506,11 @@ static int32_t getTableDelData(STbData *pMem, STbData *pIMem, SDelFReader *pDelR
SArray *aDelData) {
int32_t code = 0;
if (pDelIdx) {
code = getTableDelDataFromDelIdx(pDelReader, pDelIdx, aDelData);
if (code) goto _err;
}
if (pMem) {
code = getTableDelDataFromTbData(pMem, aDelData);
if (code) goto _err;
......@@ -516,11 +521,6 @@ static int32_t getTableDelData(STbData *pMem, STbData *pIMem, SDelFReader *pDelR
if (code) goto _err;
}
if (pDelIdx) {
code = getTableDelDataFromDelIdx(pDelReader, pDelIdx, aDelData);
if (code) goto _err;
}
_err:
return code;
}
......
......@@ -198,7 +198,7 @@ static int32_t tsdbSnapCmprData(STsdbSnapReader* pReader, uint8_t** ppData) {
ASSERT(pReader->bData.nRow);
int32_t aBufN[5] = {0};
code = tCmprBlockData(&pReader->bData, TWO_STAGE_COMP, NULL, NULL, pReader->aBuf, aBufN);
code = tCmprBlockData(&pReader->bData, NO_COMPRESSION, NULL, NULL, pReader->aBuf, aBufN);
if (code) goto _exit;
int32_t size = aBufN[0] + aBufN[1] + aBufN[2] + aBufN[3];
......@@ -276,7 +276,7 @@ static int32_t tsdbSnapReadTimeSeriesData(STsdbSnapReader* pReader, uint8_t** pp
code = tsdbSnapReadNextRow(pReader, &pRowInfo);
TSDB_CHECK_CODE(code, lino, _exit);
if (pReader->bData.nRow >= 4096) break;
if (pReader->bData.nRow >= 81920) break;
} while (pRowInfo);
ASSERT(pReader->bData.nRow > 0);
......
......@@ -750,7 +750,7 @@ void clearResultRowInitFlag(SqlFunctionCtx* pCtx, int32_t numOfOutput);
SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
bool isIntervalQuery, SAggSupporter* pSup);
bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup);
// operator creater functions
// clang-format off
SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode* pExNode, SExecTaskInfo* pTaskInfo);
......
......@@ -195,9 +195,12 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, i
*/
SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
bool isIntervalQuery, SAggSupporter* pSup) {
bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup) {
SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId);
if (!keepGroup) {
*(uint64_t*)pSup->keyBuf = calcGroupId(pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
}
SResultRowPosition* p1 =
(SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
......@@ -1034,7 +1037,7 @@ void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uin
int32_t* rowEntryInfoOffset = pOperator->exprSupp.rowEntryInfoOffset;
SResultRow* pResultRow = doSetResultOutBufByKey(pAggInfo->aggSup.pResultBuf, pResultRowInfo, (char*)&groupId,
sizeof(groupId), true, groupId, pTaskInfo, false, &pAggInfo->aggSup);
sizeof(groupId), true, groupId, pTaskInfo, false, &pAggInfo->aggSup, true);
/*
* not assign result buffer yet, add new result buffer
* all group belong to one result set, and each group result has different group id so set the id to be one
......
......@@ -277,6 +277,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
terrno = TSDB_CODE_SUCCESS;
int32_t num = 0;
uint64_t groupId = 0;
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
// Compare with the previous row of this column, and do not set the output buffer again if they are identical.
if (!pInfo->isInit) {
......@@ -473,6 +474,8 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode*
initResultRowInfo(&pInfo->binfo.resultRowInfo);
setOperatorInfo(pOperator, "GroupbyAggOperator", 0, true, OP_NOT_OPENED, pInfo, pTaskInfo);
pInfo->binfo.mergeResultBlock = pAggNode->mergeDataBlock;
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, hashGroupbyAggregate, NULL, destroyGroupOperatorInfo,
optrDefaultBufFn, NULL);
code = appendDownstream(pOperator, &downstream, 1);
......@@ -917,7 +920,7 @@ int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo,
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
SResultRow* pResultRow =
doSetResultOutBufByKey(pBuf, pResultRowInfo, (char*)pData, bytes, true, groupId, pTaskInfo, false, pAggSup);
doSetResultOutBufByKey(pBuf, pResultRowInfo, (char*)pData, bytes, true, groupId, pTaskInfo, false, pAggSup, false);
assert(pResultRow != NULL);
setResultRowInitCtx(pResultRow, pCtx, numOfCols, pOperator->exprSupp.rowEntryInfoOffset);
......
......@@ -580,7 +580,7 @@ void setFunctionResultOutput(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, SA
int64_t tid = 0;
int64_t groupId = 0;
SResultRow* pRow = doSetResultOutBufByKey(pSup->pResultBuf, pResultRowInfo, (char*)&tid, sizeof(tid), true, groupId,
pTaskInfo, false, pSup);
pTaskInfo, false, pSup, true);
for (int32_t i = 0; i < numOfExprs; ++i) {
struct SResultRowEntryInfo* pEntry = getResultEntryInfo(pRow, i, rowEntryInfoOffset);
......
......@@ -78,7 +78,7 @@ static int32_t setTimeWindowOutputBuf(SResultRowInfo* pResultRowInfo, STimeWindo
int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup,
SExecTaskInfo* pTaskInfo) {
SResultRow* pResultRow = doSetResultOutBufByKey(pAggSup->pResultBuf, pResultRowInfo, (char*)&win->skey, TSDB_KEYSIZE,
masterscan, tableGroupId, pTaskInfo, true, pAggSup);
masterscan, tableGroupId, pTaskInfo, true, pAggSup, true);
if (pResultRow == NULL) {
*pResult = NULL;
......
......@@ -3372,8 +3372,8 @@ static int32_t checkLimit(STranslateContext* pCxt, SSelectStmt* pSelect) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_OFFSET_LESS_ZERO);
}
if (NULL != pSelect->pSlimit && NULL == pSelect->pPartitionByList) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY);
if (NULL != pSelect->pSlimit && (NULL == pSelect->pPartitionByList && NULL == pSelect->pGroupByList)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_GROUP_BY);
}
return TSDB_CODE_SUCCESS;
......@@ -5573,7 +5573,8 @@ static int32_t checkCreateTopic(STranslateContext* pCxt, SCreateTopicStmt* pStmt
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt->pQuery)) {
SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery;
if (!pSelect->isDistinct && QUERY_NODE_REAL_TABLE == nodeType(pSelect->pFromTable) &&
if (!pSelect->isDistinct &&
(NULL != pSelect->pFromTable && QUERY_NODE_REAL_TABLE == nodeType(pSelect->pFromTable)) &&
NULL == pSelect->pGroupByList && NULL == pSelect->pLimit && NULL == pSelect->pSlimit &&
NULL == pSelect->pOrderByList && NULL == pSelect->pPartitionByList) {
return TSDB_CODE_SUCCESS;
......
......@@ -103,8 +103,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "Incorrect TIMESTAMP value: %s";
case TSDB_CODE_PAR_OFFSET_LESS_ZERO:
return "soffset/offset can not be less than 0";
case TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY:
return "slimit/soffset only available for PARTITION BY query";
case TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_GROUP_BY:
return "slimit/soffset only available for PARTITION/GROUP BY query";
case TSDB_CODE_PAR_INVALID_TOPIC_QUERY:
return "Invalid topic query";
case TSDB_CODE_PAR_INVALID_DROP_STABLE:
......
......@@ -515,7 +515,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_ONLY_ONE_JSON_TAG, "Only one tag if ther
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INCORRECT_NUM_OF_COL, "Query block has incorrect number of result columns")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL, "Incorrect TIMESTAMP value")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_OFFSET_LESS_ZERO, "soffset/offset can not be less than 0")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY, "slimit/soffset only available for PARTITION BY query")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_GROUP_BY, "slimit/soffset only available for PARTITION/GROUP BY query")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TOPIC_QUERY, "Invalid topic query")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_DROP_STABLE, "Cannot drop super table in batch")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE, "Start(end) time of query range required or time range too large")
......
......@@ -108,49 +108,49 @@ class TDTestCase:
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("select count(*) from (select distinct(tbname) from db.stb2)")
tdSql.query("select count(*) from (select distinct(tbname) from stmt_db.stb2)")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.stb2")
tdSql.query("select count(*) from stmt_db.stb2")
tdSql.checkData(0, 0, 160)
tdSql.query("select * from information_schema.ins_databases")
tdSql.checkData(2, 14, "us")
tdSql.query("select * from information_schema.ins_databases where name='stmt_db'")
tdSql.checkData(0, 14, "us")
tdSql.execute("reset query cache")
tdSql.query("select count(*) from (select distinct(tbname) from db.`stb2-2`)")
tdSql.query("select count(*) from (select distinct(tbname) from stmt_db.`stb2-2`)")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.`stb2-2`")
tdSql.query("select count(*) from stmt_db.`stb2-2`")
tdSql.checkData(0, 0, 160)
cmd = "%s -f ./5-taos-tools/taosbenchmark/json/rest_auto_create_table.json" %binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("select count(*) from (select distinct(tbname) from db.stb3)")
tdSql.query("select count(*) from (select distinct(tbname) from rest_db.stb3)")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.stb3")
tdSql.query("select count(*) from rest_db.stb3")
tdSql.checkData(0, 0, 160)
tdSql.query("select * from information_schema.ins_databases")
tdSql.checkData(2, 14, "ns")
tdSql.query("select * from information_schema.ins_databases where name='rest_db'")
tdSql.checkData(0, 14, "ns")
tdSql.execute("reset query cache")
tdSql.query("select count(*) from (select distinct(tbname) from db.`stb3-2`)")
tdSql.query("select count(*) from (select distinct(tbname) from rest_db.`stb3-2`)")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.`stb3-2`")
tdSql.query("select count(*) from rest_db.`stb3-2`")
tdSql.checkData(0, 0, 160)
cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_auto_create_table.json" %binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("reset query cache")
tdSql.query("select count(*) from (select distinct(tbname) from db.stb4)")
tdSql.query("select count(*) from (select distinct(tbname) from sml_db.stb4)")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.stb4")
tdSql.query("select count(*) from sml_db.stb4")
tdSql.checkData(0, 0, 160)
tdSql.execute("reset query cache")
tdSql.query("select count(*) from (select distinct(tbname) from db.`stb4-2`)")
tdSql.query("select count(*) from (select distinct(tbname) from sml_db.`stb4-2`)")
tdSql.checkData(0, 0, 8)
tdSql.query("select count(*) from db.`stb4-2`")
tdSql.query("select count(*) from sml_db.`stb4-2`")
tdSql.checkData(0, 0, 160)
tAdapter.stop()
......
......@@ -15,7 +15,7 @@
"num_of_records_per_req": 10,
"databases": [{
"dbinfo": {
"name": "db",
"name": "rest_db",
"drop": "yes",
"replica": 1,
"precision": "ns",
......
......@@ -15,7 +15,7 @@
"num_of_records_per_req": 10,
"databases": [{
"dbinfo": {
"name": "db",
"name": "sml_db",
"drop": "yes",
"replica": 1,
"precision": "ms",
......
......@@ -15,7 +15,7 @@
"num_of_records_per_req": 10,
"databases": [{
"dbinfo": {
"name": "db",
"name": "stmt_db",
"drop": "yes",
"replica": 1,
"precision": "us",
......
......@@ -168,6 +168,7 @@
,,y,script,./test.sh -f tsim/parser/union.sim
,,y,script,./test.sh -f tsim/parser/union_sysinfo.sim
,,y,script,./test.sh -f tsim/parser/where.sim
,,y,script,./test.sh -f tsim/parser/slimit_limit.sim
,,y,script,./test.sh -f tsim/query/tagLikeFilter.sim
,,y,script,./test.sh -f tsim/query/charScalarFunction.sim
,,y,script,./test.sh -f tsim/query/explain.sim
......
......@@ -69,12 +69,19 @@ ulimit -c unlimited
md5sum /usr/lib/libtaos.so.1
md5sum /home/TDinternal/debug/build/lib/libtaos.so
#define taospy 2.7.6
pip3 list|grep taospy
pip3 uninstall taospy -y
pip3 install taospy==2.7.6
$TIMEOUT_CMD $cmd
RET=$?
echo "cmd exit code: $RET"
md5sum /usr/lib/libtaos.so.1
md5sum /home/TDinternal/debug/build/lib/libtaos.so
if [ $RET -ne 0 ]; then
pwd
fi
......
......@@ -130,8 +130,6 @@ docker run \
-v ${SOURCEDIR}:/usr/local/src/ \
-v "$TMP_DIR/thread_volume/$thread_no/sim:${SIM_DIR}" \
-v ${TMP_DIR}/thread_volume/$thread_no/coredump:$coredump_dir \
-v $WORKDIR/taos-connector-python/taos:/usr/local/lib/python3.8/site-packages/taos:ro \
-v $WORKDIR/taos-connector-python/taosrest:/usr/local/lib/python3.8/site-packages/taosrest:ro \
--rm --ulimit core=-1 taos_test:v1.0 $CONTAINER_TESTDIR/tests/parallel_test/run_case.sh -d "$exec_dir" -c "$cmd" $extra_param
ret=$?
exit $ret
......
......@@ -415,12 +415,12 @@ if $data03 != 0 then
return -1
endi
sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 limit 1;
sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 slimit 1;
if $rows != 1 then
return -1
endi
sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 limit 20 offset 9990;
sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 slimit 20 soffset 9990;
if $rows != 10 then
return -1
endi
......
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
sql drop database if exists db1;
sql create database db1 vgroups 1;
sql use db1;
sql create stable sta (ts timestamp, f1 int, f2 binary(200)) tags(t1 int, t2 int, t3 int);
sql create table tba1 using sta tags(1, 1, 1);
sql create table tba2 using sta tags(2, 2, 2);
sql create table tba3 using sta tags(3, 3, 3);
sql create table tba4 using sta tags(4, 4, 4);
sql create table tba5 using sta tags(5, 5, 5);
sql create table tba6 using sta tags(6, 6, 6);
sql create table tba7 using sta tags(7, 7, 7);
sql create table tba8 using sta tags(8, 8, 8);
sql create index index1 on sta (t2);
sql insert into tba1 values ('2022-04-26 15:15:01', 1, "a");
sql insert into tba1 values ('2022-04-26 15:15:02', 11, "a");
sql insert into tba2 values ('2022-04-26 15:15:01', 2, "a");
sql insert into tba2 values ('2022-04-26 15:15:02', 22, "a");
sql insert into tba3 values ('2022-04-26 15:15:01', 3, "a");
sql insert into tba4 values ('2022-04-26 15:15:01', 4, "a");
sql insert into tba5 values ('2022-04-26 15:15:01', 5, "a");
sql insert into tba6 values ('2022-04-26 15:15:01', 6, "a");
sql insert into tba7 values ('2022-04-26 15:15:01', 7, "a");
sql insert into tba8 values ('2022-04-26 15:15:01', 8, "a");
sql select t1,count(*) from sta group by t1 limit 1;
if $rows != 8 then
return -1
endi
sql select t1,count(*) from sta group by t1 slimit 1;
if $rows != 1 then
return -1
endi
sql select f1,count(*) from sta group by f1 limit 1;
if $rows != 10 then
return -1
endi
sql select f1,count(*) from sta group by f1 slimit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 limit 1;
if $rows != 10 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 slimit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 limit 1;
if $rows != 10 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 slimit 1;
if $rows != 1 then
return -1
endi
sql select t1,count(*) from sta group by t1 order by t1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,count(*) from sta group by t1 order by t1 slimit 1;
if $rows != 8 then
return -1
endi
sql select f1,count(*) from sta group by f1 order by f1 limit 1;
if $rows != 1 then
return -1
endi
sql select f1,count(*) from sta group by f1 order by f1 slimit 1;
if $rows != 10 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 order by t1,f1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 order by t1,f1 slimit 1;
if $rows != 10 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 order by f1,t1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 order by f1,t1 slimit 1;
if $rows != 10 then
return -1
endi
sql select t1,count(*) from sta group by t1 slimit 1 limit 1;
if $rows != 1 then
return -1
endi
sql select f1,count(*) from sta group by f1 slimit 1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 slimit 1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 slimit 1 limit 1;
if $rows != 1 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
......@@ -11,45 +11,42 @@
# -*- coding: utf-8 -*-
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import socket
import subprocess
import random
import socket
import string
import random
import subprocess
import sys
import time
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
from util.log import *
from util.sql import *
import taos
from util.cases import *
from util.common import *
from util.dnodes import *
from util.dnodes import TDDnode, TDDnodes
from util.log import *
from util.sql import *
from util.sqlset import *
from util.dnodes import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
#
# -------------- util --------------------------
#
def pathSize(path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for i in filenames:
#use join to concatenate all the components of path
# use join to concatenate all the components of path
f = os.path.join(dirpath, i)
#use getsize to generate size in bytes and add it to the total size
# use getsize to generate size in bytes and add it to the total size
total_size += os.path.getsize(f)
#print(dirpath)
# print(dirpath)
print(" %s %.02f MB"%(path, total_size/1024/1024))
print(" %s %.02f MB" % (path, total_size/1024/1024))
return total_size
'''
total = 0
with os.scandir(path) as it:
......@@ -67,24 +64,27 @@ def pathSize(path):
# --------------- cluster ------------------------
#
class MyDnodes(TDDnodes):
def __init__(self ,dnodes_lists):
super(MyDnodes,self).__init__()
def __init__(self, dnodes_lists):
super(MyDnodes, self).__init__()
self.dnodes = dnodes_lists # dnode must be TDDnode instance
self.simDeployed = False
class TagCluster:
noConn = True
def init(self, conn, logSql, replicaVar=1):
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
self.depoly_cluster(5)
self.master_dnode = self.TDDnodes.dnodes[0]
self.host=self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
self.host = self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(
self.master_dnode.cfgDict["fqdn"], config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor())
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
......@@ -101,8 +101,7 @@ class TagCluster:
break
return buildPath
def depoly_cluster(self ,dnodes_nums):
def depoly_cluster(self, dnodes_nums):
testCluster = False
valgrind = 0
......@@ -126,7 +125,7 @@ class TagCluster:
self.TDDnodes.setAsan(tdDnodes.getAsan())
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
self.TDDnodes.deploy(dnode.index, {})
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index)
......@@ -136,7 +135,8 @@ class TagCluster:
sql = ""
for dnode in self.TDDnodes.dnodes[1:]:
# print(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
dnode_id = dnode.cfgDict["fqdn"] + \
":" + dnode.cfgDict["serverPort"]
if dnode_first_host == "":
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
......@@ -145,18 +145,17 @@ class TagCluster:
cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s "
cmd += f'"{sql}"'
print(cmd)
os.system(cmd)
os.system(cmd)
time.sleep(2)
tdLog.info(" create cluster done! ")
def getConnection(self, dnode):
host = dnode.cfgDict["fqdn"]
port = dnode.cfgDict["serverPort"]
config_dir = dnode.cfgDir
return taos.connect(host=host, port=int(port), config=config_dir)
def run(self):
tdLog.info(" create cluster ok.")
......@@ -168,22 +167,22 @@ class TagCluster:
class PerfDB:
def __init__(self):
self.sqls = []
self.spends= []
self.spends = []
# execute
def execute(self, sql):
print(f" perfdb execute {sql}")
print(f" perfdb execute {sql}")
stime = time.time()
ret = tdSql.execute(sql, 1)
spend = time.time() - stime
self.sqls.append(sql)
self.spends.append(spend)
return ret
# query
def query(self, sql):
print(f" perfdb query {sql}")
print(f" perfdb query {sql}")
start = time.time()
ret = tdSql.query(sql, None, 1)
spend = time.time() - start
......@@ -203,9 +202,9 @@ class TDTestCase:
self.tagCluster = TagCluster()
self.tagCluster.init(conn, logSql, replicaVar)
self.lenBinary = 64
self.lenNchar = 32
# column
self.lenNchar = 32
# column
self.column_dict = {
'ts': 'timestamp',
'col1': 'tinyint',
......@@ -252,14 +251,14 @@ class TDTestCase:
# query
def query(self, sql):
return self.dbs[self.cur].query(sql)
def set_stb_sql(self,stbname,column_dict,tag_dict):
return self.dbs[self.cur].query(sql)
def set_stb_sql(self, stbname, column_dict, tag_dict):
column_sql = ''
tag_sql = ''
for k,v in column_dict.items():
for k, v in column_dict.items():
column_sql += f"{k} {v}, "
for k,v in tag_dict.items():
for k, v in tag_dict.items():
tag_sql += f"{k} {v}, "
create_stb_sql = f'create stable {stbname} ({column_sql[:-2]}) tags ({tag_sql[:-2]})'
return create_stb_sql
......@@ -268,37 +267,41 @@ class TDTestCase:
def create_database(self, dbname, vgroups, replica):
sql = f'create database {dbname} vgroups {vgroups} replica {replica}'
tdSql.execute(sql)
#tdSql.execute(sql)
# tdSql.execute(sql)
tdSql.execute(f'use {dbname}')
# create stable and child tables
def create_table(self, stbname, tbname, count):
# create stable
create_table_sql = self.set_stb_sql(stbname, self.column_dict, self.tag_dict)
create_table_sql = self.set_stb_sql(
stbname, self.column_dict, self.tag_dict)
tdSql.execute(create_table_sql)
# create child table
tdLog.info(f" start create {count} child tables.")
for i in range(count):
ti = i % 128
binTxt = self.random_string(self.lenBinary)
tags = f'{ti},{ti},{i},{i},{ti},{ti},{i},{i},{i}.000{i},{i}.000{i},true,"{binTxt}","nch{i}",now'
sql = f'create table {tbname}{i} using {stbname} tags({tags})'
tdSql.execute(sql)
if i > 0 and i % 1000 == 0:
tdLog.info(f" child table count = {i}")
batchSql = ""
batchSize = 5000
for i in range(int(count/batchSize)):
batchSql = "create table"
for j in range(batchSize):
ti = (i * batchSize + j) % 128
binTxt = self.random_string(self.lenBinary)
idx = i * batchSize + j
tags = f'{ti},{ti},{idx},{idx},{ti},{ti},{idx},{idx},{idx}.000{idx},{idx}.000{idx},true,"{binTxt}","nch{idx}",now'
sql = f'{tbname}{idx} using {stbname} tags({tags})'
batchSql = batchSql + " " + sql
tdSql.execute(batchSql)
tdLog.info(f" child table count = {i * batchSize}")
tdLog.info(f" end create {count} child tables.")
# create stable and child tables
def create_tagidx(self, stbname):
cnt = -1
for key in self.tag_dict.keys():
# first tag have default index, so skip
if cnt == -1:
cnt = 0
continue;
continue
sql = f'create index idx_{key} on {stbname} ({key})'
tdLog.info(f" sql={sql}")
tdSql.execute(sql)
......@@ -309,11 +312,11 @@ class TDTestCase:
def insert_data(self, tbname):
# d1 insert 3 rows
for i in range(3):
sql = f'insert into {tbname}1(ts,col1) values(now+{i}s,{i});'
sql = f'insert into {tbname}1(ts,col1) values(now+{i}s,{i});'
tdSql.execute(sql)
# d20 insert 4
for i in range(4):
sql = f'insert into {tbname}20(ts,col1) values(now+{i}s,{i});'
sql = f'insert into {tbname}20(ts,col1) values(now+{i}s,{i});'
tdSql.execute(sql)
# check show indexs
......@@ -376,17 +379,17 @@ class TDTestCase:
self.query(sql)
tdSql.checkRows(4)
# drop child table
def drop_tables(self, tbname, count):
# table d1 and d20 have verify data , so can not drop
start = random.randint(21, count/2)
end = random.randint(count/2 + 1, count - 1)
end = random.randint(count/2 + 1, count - 1)
for i in range(start, end):
sql = f'drop table {tbname}{i}'
tdSql.execute(sql)
cnt = end - start + 1
tdLog.info(f' drop table from {start} to {end} count={cnt}')
cnt = end - start + 1
tdLog.info(f' drop table from {start} to {end} count={cnt}')
# drop tag index
def drop_tagidx(self, dbname, stbname):
......@@ -396,11 +399,11 @@ class TDTestCase:
# first tag have default index, so skip
if cnt == -1:
cnt = 0
continue;
continue
sql = f'drop index idx_{key}'
tdSql.execute(sql)
cnt += 1
# check idx result is 0
sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="{dbname}"'
tdSql.query(sql)
......@@ -408,17 +411,19 @@ class TDTestCase:
tdLog.info(f' drop {cnt} tag indexs ok.')
# show performance
def show_performance(self, count) :
db = self.dbs[0]
def show_performance(self, count):
db = self.dbs[0]
db1 = self.dbs[1]
cnt = len(db.sqls)
cnt1 = len(db1.sqls)
if cnt != len(db1.sqls):
tdLog.info(f" datebase sql count not equal. cnt={cnt} cnt1={cnt1}\n")
if cnt != len(db1.sqls):
tdLog.info(
f" datebase sql count not equal. cnt={cnt} cnt1={cnt1}\n")
return False
tdLog.info(f" database sql cnt ={cnt}")
print(f" ----------------- performance (child tables = {count})--------------------")
print(
f" ----------------- performance (child tables = {count})--------------------")
print(" No time(index) time(no-index) diff(col3-col2) rate(col2/col3) sql")
for i in range(cnt):
key = db.sqls[i]
......@@ -427,12 +432,13 @@ class TDTestCase:
value1 = db1.spends[i]
diff = value1 - value
rate = value/value1*100
print(" %d %.3fs %.3fs %.3fs %d%% %s"%(i+1, value, value1, diff, rate, key))
print(" %d %.3fs %.3fs %.3fs %d%% %s" % (
i+1, value, value1, diff, rate, key))
print(" --------------------- end ------------------------")
return True
return True
def show_diskspace(self):
#calc
# calc
selfPath = os.path.dirname(os.path.realpath(__file__))
projPath = ""
if ("community" in selfPath):
......@@ -451,43 +457,41 @@ class TDTestCase:
idx_size = vnode2_size + vnode3_size
noidx_size = vnode4_size + vnode5_size
print(" index = %.02f M"%(idx_size/1024/1024))
print(" no-index = %.02f M"%(noidx_size/1024/1024))
print(" index/no-index = %.2f multiple"%(idx_size/noidx_size))
print(" index = %.02f M" % (idx_size/1024/1024))
print(" no-index = %.02f M" % (noidx_size/1024/1024))
print(" index/no-index = %.2f multiple" % (idx_size/noidx_size))
print(" -------------------- end ------------------------")
# main
def testdb(self, dbname, stable, tbname, count, createidx):
# cur
if createidx:
self.cur = 0
else :
else:
self.cur = 1
# do
# do
self.create_database(dbname, 2, 1)
self.create_table(stable, tbname, count)
if(createidx):
self.create_tagidx(stable)
if (createidx):
self.create_tagidx(stable)
self.insert_data(tbname)
if(createidx):
self.show_tagidx(dbname,stable)
if (createidx):
self.show_tagidx(dbname, stable)
self.query_tagidx(stable)
#self.drop_tables(tbname, count)
#if(createidx):
# self.drop_tables(tbname, count)
# if(createidx):
# self.drop_tagidx(dbname, stable)
# query after delete , expect no crash
#self.query_tagidx(stable)
# self.query_tagidx(stable)
tdSql.execute(f'flush database {dbname}')
# run
def run(self):
self.tagCluster.run()
# var
dbname = "tagindex"
dbname1 = dbname + "1"
......@@ -511,10 +515,10 @@ class TDTestCase:
self.show_diskspace()
def stop(self):
self.tagCluster.stop()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
tdCases.addLinux(__file__, TDTestCase())
......@@ -171,6 +171,7 @@ class TDTestCase:
if any(parm in condition.lower().strip() for parm in condition_exception):
print(f"case in {line}: ", end='')
print(f"condition : {condition}: ", end='')
return tdSql.error(self.sample_query_form(
sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
table_expr=table_expr, condition=condition
......@@ -391,16 +392,6 @@ class TDTestCase:
self.checksample(**case25)
case26 = {"k": 1000}
self.checksample(**case26)
case27 = {
"table_expr": f"{DBNAME}.stb1",
"condition": "group by tbname slimit 1 "
}
self.checksample(**case27) # with slimit
case28 = {
"table_expr": f"{DBNAME}.stb1",
"condition": "group by tbname slimit 1 soffset 1"
}
self.checksample(**case28) # with soffset
pass
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册