提交 21ef5e55 编写于 作者: H Haojun Liao

Merge branch 'main' into fix/liaohj

......@@ -248,11 +248,11 @@ NULLS 语法用来指定 NULL 值在排序中输出的位置。NULLS LAST 是升
LIMIT 控制输出条数,OFFSET 指定从第几条之后开始输出。LIMIT/OFFSET 对结果集的执行顺序在 ORDER BY 之后。LIMIT 5 OFFSET 2 可以简写为 LIMIT 2, 5,都输出第 3 行到第 7 行数据。
在有 PARTITION BY 子句时,LIMIT 控制的是每个切分的分片中的输出,而不是总的结果集输出。
在有 PARTITION BY/GROUP BY 子句时,LIMIT 控制的是每个切分的分片中的输出,而不是总的结果集输出。
## SLIMIT
SLIMIT 和 PARTITION BY 子句一起使用,用来控制输出的分片的数量。SLIMIT 5 SOFFSET 2 可以简写为 SLIMIT 2, 5,都表示输出第 3 个到第 7 个分片。
SLIMIT 和 PARTITION BY/GROUP BY 子句一起使用,用来控制输出的分片的数量。SLIMIT 5 SOFFSET 2 可以简写为 SLIMIT 2, 5,都表示输出第 3 个到第 7 个分片。
需要注意,如果有 ORDER BY 子句,则输出只有一个分片。
......
......@@ -642,7 +642,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INCORRECT_NUM_OF_COL TAOS_DEF_ERROR_CODE(0, 0x2634)
#define TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL TAOS_DEF_ERROR_CODE(0, 0x2635)
#define TSDB_CODE_PAR_OFFSET_LESS_ZERO TAOS_DEF_ERROR_CODE(0, 0x2637)
#define TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY TAOS_DEF_ERROR_CODE(0, 0x2638)
#define TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_GROUP_BY TAOS_DEF_ERROR_CODE(0, 0x2638)
#define TSDB_CODE_PAR_INVALID_TOPIC_QUERY TAOS_DEF_ERROR_CODE(0, 0x2639)
#define TSDB_CODE_PAR_INVALID_DROP_STABLE TAOS_DEF_ERROR_CODE(0, 0x263A)
#define TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE TAOS_DEF_ERROR_CODE(0, 0x263B)
......
......@@ -750,7 +750,7 @@ void clearResultRowInitFlag(SqlFunctionCtx* pCtx, int32_t numOfOutput);
SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
bool isIntervalQuery, SAggSupporter* pSup);
bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup);
// operator creater functions
// clang-format off
SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode* pExNode, SExecTaskInfo* pTaskInfo);
......
......@@ -195,9 +195,12 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, i
*/
SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
bool isIntervalQuery, SAggSupporter* pSup) {
bool isIntervalQuery, SAggSupporter* pSup, bool keepGroup) {
SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId);
if (!keepGroup) {
*(uint64_t*)pSup->keyBuf = calcGroupId(pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
}
SResultRowPosition* p1 =
(SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
......@@ -1034,7 +1037,7 @@ void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uin
int32_t* rowEntryInfoOffset = pOperator->exprSupp.rowEntryInfoOffset;
SResultRow* pResultRow = doSetResultOutBufByKey(pAggInfo->aggSup.pResultBuf, pResultRowInfo, (char*)&groupId,
sizeof(groupId), true, groupId, pTaskInfo, false, &pAggInfo->aggSup);
sizeof(groupId), true, groupId, pTaskInfo, false, &pAggInfo->aggSup, true);
/*
* not assign result buffer yet, add new result buffer
* all group belong to one result set, and each group result has different group id so set the id to be one
......
......@@ -277,6 +277,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
terrno = TSDB_CODE_SUCCESS;
int32_t num = 0;
uint64_t groupId = 0;
for (int32_t j = 0; j < pBlock->info.rows; ++j) {
// Compare with the previous row of this column, and do not set the output buffer again if they are identical.
if (!pInfo->isInit) {
......@@ -473,6 +474,8 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode*
initResultRowInfo(&pInfo->binfo.resultRowInfo);
setOperatorInfo(pOperator, "GroupbyAggOperator", 0, true, OP_NOT_OPENED, pInfo, pTaskInfo);
pInfo->binfo.mergeResultBlock = pAggNode->mergeDataBlock;
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, hashGroupbyAggregate, NULL, destroyGroupOperatorInfo,
optrDefaultBufFn, NULL);
code = appendDownstream(pOperator, &downstream, 1);
......@@ -917,7 +920,7 @@ int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo,
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
SResultRow* pResultRow =
doSetResultOutBufByKey(pBuf, pResultRowInfo, (char*)pData, bytes, true, groupId, pTaskInfo, false, pAggSup);
doSetResultOutBufByKey(pBuf, pResultRowInfo, (char*)pData, bytes, true, groupId, pTaskInfo, false, pAggSup, false);
assert(pResultRow != NULL);
setResultRowInitCtx(pResultRow, pCtx, numOfCols, pOperator->exprSupp.rowEntryInfoOffset);
......
......@@ -580,7 +580,7 @@ void setFunctionResultOutput(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, SA
int64_t tid = 0;
int64_t groupId = 0;
SResultRow* pRow = doSetResultOutBufByKey(pSup->pResultBuf, pResultRowInfo, (char*)&tid, sizeof(tid), true, groupId,
pTaskInfo, false, pSup);
pTaskInfo, false, pSup, true);
for (int32_t i = 0; i < numOfExprs; ++i) {
struct SResultRowEntryInfo* pEntry = getResultEntryInfo(pRow, i, rowEntryInfoOffset);
......
......@@ -78,7 +78,7 @@ static int32_t setTimeWindowOutputBuf(SResultRowInfo* pResultRowInfo, STimeWindo
int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup,
SExecTaskInfo* pTaskInfo) {
SResultRow* pResultRow = doSetResultOutBufByKey(pAggSup->pResultBuf, pResultRowInfo, (char*)&win->skey, TSDB_KEYSIZE,
masterscan, tableGroupId, pTaskInfo, true, pAggSup);
masterscan, tableGroupId, pTaskInfo, true, pAggSup, true);
if (pResultRow == NULL) {
*pResult = NULL;
......
......@@ -3372,8 +3372,8 @@ static int32_t checkLimit(STranslateContext* pCxt, SSelectStmt* pSelect) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_OFFSET_LESS_ZERO);
}
if (NULL != pSelect->pSlimit && NULL == pSelect->pPartitionByList) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY);
if (NULL != pSelect->pSlimit && (NULL == pSelect->pPartitionByList && NULL == pSelect->pGroupByList)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_GROUP_BY);
}
return TSDB_CODE_SUCCESS;
......
......@@ -103,8 +103,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "Incorrect TIMESTAMP value: %s";
case TSDB_CODE_PAR_OFFSET_LESS_ZERO:
return "soffset/offset can not be less than 0";
case TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY:
return "slimit/soffset only available for PARTITION BY query";
case TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_GROUP_BY:
return "slimit/soffset only available for PARTITION/GROUP BY query";
case TSDB_CODE_PAR_INVALID_TOPIC_QUERY:
return "Invalid topic query";
case TSDB_CODE_PAR_INVALID_DROP_STABLE:
......
......@@ -515,7 +515,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_ONLY_ONE_JSON_TAG, "Only one tag if ther
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INCORRECT_NUM_OF_COL, "Query block has incorrect number of result columns")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INCORRECT_TIMESTAMP_VAL, "Incorrect TIMESTAMP value")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_OFFSET_LESS_ZERO, "soffset/offset can not be less than 0")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_BY, "slimit/soffset only available for PARTITION BY query")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_SLIMIT_LEAK_PARTITION_GROUP_BY, "slimit/soffset only available for PARTITION/GROUP BY query")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TOPIC_QUERY, "Invalid topic query")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_DROP_STABLE, "Cannot drop super table in batch")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE, "Start(end) time of query range required or time range too large")
......
......@@ -168,6 +168,7 @@
,,y,script,./test.sh -f tsim/parser/union.sim
,,y,script,./test.sh -f tsim/parser/union_sysinfo.sim
,,y,script,./test.sh -f tsim/parser/where.sim
,,y,script,./test.sh -f tsim/parser/slimit_limit.sim
,,y,script,./test.sh -f tsim/query/tagLikeFilter.sim
,,y,script,./test.sh -f tsim/query/charScalarFunction.sim
,,y,script,./test.sh -f tsim/query/explain.sim
......
......@@ -415,12 +415,12 @@ if $data03 != 0 then
return -1
endi
sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 limit 1;
sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 slimit 1;
if $rows != 1 then
return -1
endi
sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 limit 20 offset 9990;
sql select count(*),first(ts),last(ts),min(c3) from group_tb1 group by c4 slimit 20 soffset 9990;
if $rows != 10 then
return -1
endi
......
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
sql drop database if exists db1;
sql create database db1 vgroups 1;
sql use db1;
sql create stable sta (ts timestamp, f1 int, f2 binary(200)) tags(t1 int, t2 int, t3 int);
sql create table tba1 using sta tags(1, 1, 1);
sql create table tba2 using sta tags(2, 2, 2);
sql create table tba3 using sta tags(3, 3, 3);
sql create table tba4 using sta tags(4, 4, 4);
sql create table tba5 using sta tags(5, 5, 5);
sql create table tba6 using sta tags(6, 6, 6);
sql create table tba7 using sta tags(7, 7, 7);
sql create table tba8 using sta tags(8, 8, 8);
sql create index index1 on sta (t2);
sql insert into tba1 values ('2022-04-26 15:15:01', 1, "a");
sql insert into tba1 values ('2022-04-26 15:15:02', 11, "a");
sql insert into tba2 values ('2022-04-26 15:15:01', 2, "a");
sql insert into tba2 values ('2022-04-26 15:15:02', 22, "a");
sql insert into tba3 values ('2022-04-26 15:15:01', 3, "a");
sql insert into tba4 values ('2022-04-26 15:15:01', 4, "a");
sql insert into tba5 values ('2022-04-26 15:15:01', 5, "a");
sql insert into tba6 values ('2022-04-26 15:15:01', 6, "a");
sql insert into tba7 values ('2022-04-26 15:15:01', 7, "a");
sql insert into tba8 values ('2022-04-26 15:15:01', 8, "a");
sql select t1,count(*) from sta group by t1 limit 1;
if $rows != 8 then
return -1
endi
sql select t1,count(*) from sta group by t1 slimit 1;
if $rows != 1 then
return -1
endi
sql select f1,count(*) from sta group by f1 limit 1;
if $rows != 10 then
return -1
endi
sql select f1,count(*) from sta group by f1 slimit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 limit 1;
if $rows != 10 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 slimit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 limit 1;
if $rows != 10 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 slimit 1;
if $rows != 1 then
return -1
endi
sql select t1,count(*) from sta group by t1 order by t1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,count(*) from sta group by t1 order by t1 slimit 1;
if $rows != 8 then
return -1
endi
sql select f1,count(*) from sta group by f1 order by f1 limit 1;
if $rows != 1 then
return -1
endi
sql select f1,count(*) from sta group by f1 order by f1 slimit 1;
if $rows != 10 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 order by t1,f1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 order by t1,f1 slimit 1;
if $rows != 10 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 order by f1,t1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 order by f1,t1 slimit 1;
if $rows != 10 then
return -1
endi
sql select t1,count(*) from sta group by t1 slimit 1 limit 1;
if $rows != 1 then
return -1
endi
sql select f1,count(*) from sta group by f1 slimit 1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by t1, f1 slimit 1 limit 1;
if $rows != 1 then
return -1
endi
sql select t1,f1,count(*) from sta group by f1, t1 slimit 1 limit 1;
if $rows != 1 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
......@@ -11,45 +11,42 @@
# -*- coding: utf-8 -*-
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
import taos
import sys
import time
import os
import socket
import subprocess
import random
import socket
import string
import random
import subprocess
import sys
import time
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
from util.log import *
from util.sql import *
import taos
from util.cases import *
from util.common import *
from util.dnodes import *
from util.dnodes import TDDnode, TDDnodes
from util.log import *
from util.sql import *
from util.sqlset import *
from util.dnodes import *
from util.dnodes import TDDnodes
from util.dnodes import TDDnode
#
# -------------- util --------------------------
#
def pathSize(path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for i in filenames:
#use join to concatenate all the components of path
# use join to concatenate all the components of path
f = os.path.join(dirpath, i)
#use getsize to generate size in bytes and add it to the total size
# use getsize to generate size in bytes and add it to the total size
total_size += os.path.getsize(f)
#print(dirpath)
# print(dirpath)
print(" %s %.02f MB"%(path, total_size/1024/1024))
print(" %s %.02f MB" % (path, total_size/1024/1024))
return total_size
'''
total = 0
with os.scandir(path) as it:
......@@ -67,24 +64,27 @@ def pathSize(path):
# --------------- cluster ------------------------
#
class MyDnodes(TDDnodes):
def __init__(self ,dnodes_lists):
super(MyDnodes,self).__init__()
def __init__(self, dnodes_lists):
super(MyDnodes, self).__init__()
self.dnodes = dnodes_lists # dnode must be TDDnode instance
self.simDeployed = False
class TagCluster:
noConn = True
def init(self, conn, logSql, replicaVar=1):
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
self.depoly_cluster(5)
self.master_dnode = self.TDDnodes.dnodes[0]
self.host=self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
self.host = self.master_dnode.cfgDict["fqdn"]
conn1 = taos.connect(
self.master_dnode.cfgDict["fqdn"], config=self.master_dnode.cfgDir)
tdSql.init(conn1.cursor())
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
......@@ -101,8 +101,7 @@ class TagCluster:
break
return buildPath
def depoly_cluster(self ,dnodes_nums):
def depoly_cluster(self, dnodes_nums):
testCluster = False
valgrind = 0
......@@ -126,7 +125,7 @@ class TagCluster:
self.TDDnodes.setAsan(tdDnodes.getAsan())
self.TDDnodes.stopAll()
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.deploy(dnode.index,{})
self.TDDnodes.deploy(dnode.index, {})
for dnode in self.TDDnodes.dnodes:
self.TDDnodes.starttaosd(dnode.index)
......@@ -136,7 +135,8 @@ class TagCluster:
sql = ""
for dnode in self.TDDnodes.dnodes[1:]:
# print(dnode.cfgDict)
dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
dnode_id = dnode.cfgDict["fqdn"] + \
":" + dnode.cfgDict["serverPort"]
if dnode_first_host == "":
dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
......@@ -145,18 +145,17 @@ class TagCluster:
cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s "
cmd += f'"{sql}"'
print(cmd)
os.system(cmd)
os.system(cmd)
time.sleep(2)
tdLog.info(" create cluster done! ")
def getConnection(self, dnode):
host = dnode.cfgDict["fqdn"]
port = dnode.cfgDict["serverPort"]
config_dir = dnode.cfgDir
return taos.connect(host=host, port=int(port), config=config_dir)
def run(self):
tdLog.info(" create cluster ok.")
......@@ -168,22 +167,22 @@ class TagCluster:
class PerfDB:
def __init__(self):
self.sqls = []
self.spends= []
self.spends = []
# execute
def execute(self, sql):
print(f" perfdb execute {sql}")
print(f" perfdb execute {sql}")
stime = time.time()
ret = tdSql.execute(sql, 1)
spend = time.time() - stime
self.sqls.append(sql)
self.spends.append(spend)
return ret
# query
def query(self, sql):
print(f" perfdb query {sql}")
print(f" perfdb query {sql}")
start = time.time()
ret = tdSql.query(sql, None, 1)
spend = time.time() - start
......@@ -203,9 +202,9 @@ class TDTestCase:
self.tagCluster = TagCluster()
self.tagCluster.init(conn, logSql, replicaVar)
self.lenBinary = 64
self.lenNchar = 32
# column
self.lenNchar = 32
# column
self.column_dict = {
'ts': 'timestamp',
'col1': 'tinyint',
......@@ -252,14 +251,14 @@ class TDTestCase:
# query
def query(self, sql):
return self.dbs[self.cur].query(sql)
def set_stb_sql(self,stbname,column_dict,tag_dict):
return self.dbs[self.cur].query(sql)
def set_stb_sql(self, stbname, column_dict, tag_dict):
column_sql = ''
tag_sql = ''
for k,v in column_dict.items():
for k, v in column_dict.items():
column_sql += f"{k} {v}, "
for k,v in tag_dict.items():
for k, v in tag_dict.items():
tag_sql += f"{k} {v}, "
create_stb_sql = f'create stable {stbname} ({column_sql[:-2]}) tags ({tag_sql[:-2]})'
return create_stb_sql
......@@ -268,37 +267,41 @@ class TDTestCase:
def create_database(self, dbname, vgroups, replica):
sql = f'create database {dbname} vgroups {vgroups} replica {replica}'
tdSql.execute(sql)
#tdSql.execute(sql)
# tdSql.execute(sql)
tdSql.execute(f'use {dbname}')
# create stable and child tables
def create_table(self, stbname, tbname, count):
# create stable
create_table_sql = self.set_stb_sql(stbname, self.column_dict, self.tag_dict)
create_table_sql = self.set_stb_sql(
stbname, self.column_dict, self.tag_dict)
tdSql.execute(create_table_sql)
# create child table
tdLog.info(f" start create {count} child tables.")
for i in range(count):
ti = i % 128
binTxt = self.random_string(self.lenBinary)
tags = f'{ti},{ti},{i},{i},{ti},{ti},{i},{i},{i}.000{i},{i}.000{i},true,"{binTxt}","nch{i}",now'
sql = f'create table {tbname}{i} using {stbname} tags({tags})'
tdSql.execute(sql)
if i > 0 and i % 1000 == 0:
tdLog.info(f" child table count = {i}")
batchSql = ""
batchSize = 5000
for i in range(int(count/batchSize)):
batchSql = "create table"
for j in range(batchSize):
ti = (i * batchSize + j) % 128
binTxt = self.random_string(self.lenBinary)
idx = i * batchSize + j
tags = f'{ti},{ti},{idx},{idx},{ti},{ti},{idx},{idx},{idx}.000{idx},{idx}.000{idx},true,"{binTxt}","nch{idx}",now'
sql = f'{tbname}{idx} using {stbname} tags({tags})'
batchSql = batchSql + " " + sql
tdSql.execute(batchSql)
tdLog.info(f" child table count = {i * batchSize}")
tdLog.info(f" end create {count} child tables.")
# create stable and child tables
def create_tagidx(self, stbname):
cnt = -1
for key in self.tag_dict.keys():
# first tag have default index, so skip
if cnt == -1:
cnt = 0
continue;
continue
sql = f'create index idx_{key} on {stbname} ({key})'
tdLog.info(f" sql={sql}")
tdSql.execute(sql)
......@@ -309,11 +312,11 @@ class TDTestCase:
def insert_data(self, tbname):
# d1 insert 3 rows
for i in range(3):
sql = f'insert into {tbname}1(ts,col1) values(now+{i}s,{i});'
sql = f'insert into {tbname}1(ts,col1) values(now+{i}s,{i});'
tdSql.execute(sql)
# d20 insert 4
for i in range(4):
sql = f'insert into {tbname}20(ts,col1) values(now+{i}s,{i});'
sql = f'insert into {tbname}20(ts,col1) values(now+{i}s,{i});'
tdSql.execute(sql)
# check show indexs
......@@ -376,17 +379,17 @@ class TDTestCase:
self.query(sql)
tdSql.checkRows(4)
# drop child table
def drop_tables(self, tbname, count):
# table d1 and d20 have verify data , so can not drop
start = random.randint(21, count/2)
end = random.randint(count/2 + 1, count - 1)
end = random.randint(count/2 + 1, count - 1)
for i in range(start, end):
sql = f'drop table {tbname}{i}'
tdSql.execute(sql)
cnt = end - start + 1
tdLog.info(f' drop table from {start} to {end} count={cnt}')
cnt = end - start + 1
tdLog.info(f' drop table from {start} to {end} count={cnt}')
# drop tag index
def drop_tagidx(self, dbname, stbname):
......@@ -396,11 +399,11 @@ class TDTestCase:
# first tag have default index, so skip
if cnt == -1:
cnt = 0
continue;
continue
sql = f'drop index idx_{key}'
tdSql.execute(sql)
cnt += 1
# check idx result is 0
sql = f'select index_name,column_name from information_schema.ins_indexes where db_name="{dbname}"'
tdSql.query(sql)
......@@ -408,17 +411,19 @@ class TDTestCase:
tdLog.info(f' drop {cnt} tag indexs ok.')
# show performance
def show_performance(self, count) :
db = self.dbs[0]
def show_performance(self, count):
db = self.dbs[0]
db1 = self.dbs[1]
cnt = len(db.sqls)
cnt1 = len(db1.sqls)
if cnt != len(db1.sqls):
tdLog.info(f" datebase sql count not equal. cnt={cnt} cnt1={cnt1}\n")
if cnt != len(db1.sqls):
tdLog.info(
f" datebase sql count not equal. cnt={cnt} cnt1={cnt1}\n")
return False
tdLog.info(f" database sql cnt ={cnt}")
print(f" ----------------- performance (child tables = {count})--------------------")
print(
f" ----------------- performance (child tables = {count})--------------------")
print(" No time(index) time(no-index) diff(col3-col2) rate(col2/col3) sql")
for i in range(cnt):
key = db.sqls[i]
......@@ -427,12 +432,13 @@ class TDTestCase:
value1 = db1.spends[i]
diff = value1 - value
rate = value/value1*100
print(" %d %.3fs %.3fs %.3fs %d%% %s"%(i+1, value, value1, diff, rate, key))
print(" %d %.3fs %.3fs %.3fs %d%% %s" % (
i+1, value, value1, diff, rate, key))
print(" --------------------- end ------------------------")
return True
return True
def show_diskspace(self):
#calc
# calc
selfPath = os.path.dirname(os.path.realpath(__file__))
projPath = ""
if ("community" in selfPath):
......@@ -451,43 +457,41 @@ class TDTestCase:
idx_size = vnode2_size + vnode3_size
noidx_size = vnode4_size + vnode5_size
print(" index = %.02f M"%(idx_size/1024/1024))
print(" no-index = %.02f M"%(noidx_size/1024/1024))
print(" index/no-index = %.2f multiple"%(idx_size/noidx_size))
print(" index = %.02f M" % (idx_size/1024/1024))
print(" no-index = %.02f M" % (noidx_size/1024/1024))
print(" index/no-index = %.2f multiple" % (idx_size/noidx_size))
print(" -------------------- end ------------------------")
# main
def testdb(self, dbname, stable, tbname, count, createidx):
# cur
if createidx:
self.cur = 0
else :
else:
self.cur = 1
# do
# do
self.create_database(dbname, 2, 1)
self.create_table(stable, tbname, count)
if(createidx):
self.create_tagidx(stable)
if (createidx):
self.create_tagidx(stable)
self.insert_data(tbname)
if(createidx):
self.show_tagidx(dbname,stable)
if (createidx):
self.show_tagidx(dbname, stable)
self.query_tagidx(stable)
#self.drop_tables(tbname, count)
#if(createidx):
# self.drop_tables(tbname, count)
# if(createidx):
# self.drop_tagidx(dbname, stable)
# query after delete , expect no crash
#self.query_tagidx(stable)
# self.query_tagidx(stable)
tdSql.execute(f'flush database {dbname}')
# run
def run(self):
self.tagCluster.run()
# var
dbname = "tagindex"
dbname1 = dbname + "1"
......@@ -511,10 +515,10 @@ class TDTestCase:
self.show_diskspace()
def stop(self):
self.tagCluster.stop()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
tdCases.addLinux(__file__, TDTestCase())
......@@ -171,6 +171,7 @@ class TDTestCase:
if any(parm in condition.lower().strip() for parm in condition_exception):
print(f"case in {line}: ", end='')
print(f"condition : {condition}: ", end='')
return tdSql.error(self.sample_query_form(
sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
table_expr=table_expr, condition=condition
......@@ -391,16 +392,6 @@ class TDTestCase:
self.checksample(**case25)
case26 = {"k": 1000}
self.checksample(**case26)
case27 = {
"table_expr": f"{DBNAME}.stb1",
"condition": "group by tbname slimit 1 "
}
self.checksample(**case27) # with slimit
case28 = {
"table_expr": f"{DBNAME}.stb1",
"condition": "group by tbname slimit 1 soffset 1"
}
self.checksample(**case28) # with soffset
pass
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册