提交 1d323fce 编写于 作者: Z zhaoyanggh

Merge branch 'master' of https://github.com/taosdata/TDengine into fix/TS-165

......@@ -36,7 +36,7 @@ extern "C" {
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_CHILD_TABLE))
#define UTIL_TABLE_IS_NORMAL_TABLE(metaInfo) \
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo) || UTIL_TABLE_IS_TMP_TABLE(metaInfo)))
(!(UTIL_TABLE_IS_SUPER_TABLE(metaInfo) || UTIL_TABLE_IS_CHILD_TABLE(metaInfo)))
#define UTIL_TABLE_IS_TMP_TABLE(metaInfo) \
(((metaInfo)->pTableMeta != NULL) && ((metaInfo)->pTableMeta->tableType == TSDB_TEMP_TABLE))
......
......@@ -168,6 +168,9 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo
} else {
pRes->code = numOfRows;
}
if (pRes->code == TSDB_CODE_SUCCESS) {
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
}
tscAsyncResultOnError(pSql);
return;
......
......@@ -35,6 +35,7 @@ typedef struct SCompareParam {
static bool needToMerge(SSDataBlock* pBlock, SArray* columnIndexList, int32_t index, char **buf) {
int32_t ret = 0;
size_t size = taosArrayGetSize(columnIndexList);
if (size > 0) {
ret = compare_aRv(pBlock, columnIndexList, (int32_t) size, index, buf, TSDB_ORDER_ASC);
......@@ -564,9 +565,11 @@ static void savePrevOrderColumns(char** prevRow, SArray* pColumnList, SSDataBloc
(*hasPrev) = true;
}
// tsdb_func_tag function only produce one row of result. Therefore, we need to copy the
// output value to multiple rows
static void setTagValueForMultipleRows(SQLFunctionCtx* pCtx, int32_t numOfOutput, int32_t numOfRows) {
if (numOfRows <= 1) {
return ;
return;
}
for (int32_t k = 0; k < numOfOutput; ++k) {
......@@ -574,12 +577,49 @@ static void setTagValueForMultipleRows(SQLFunctionCtx* pCtx, int32_t numOfOutput
continue;
}
int32_t inc = numOfRows - 1; // tsdb_func_tag function only produce one row of result
char* src = pCtx[k].pOutput;
char* src = pCtx[k].pOutput;
char* dst = pCtx[k].pOutput + pCtx[k].outputBytes;
for (int32_t i = 0; i < inc; ++i) {
pCtx[k].pOutput += pCtx[k].outputBytes;
memcpy(pCtx[k].pOutput, src, (size_t)pCtx[k].outputBytes);
// Let's start from the second row, as the first row has result value already.
for (int32_t i = 1; i < numOfRows; ++i) {
memcpy(dst, src, (size_t)pCtx[k].outputBytes);
dst += pCtx[k].outputBytes;
}
}
}
static void doMergeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr, int32_t rowIndex, char** pDataPtr) {
for (int32_t j = 0; j < numOfExpr; ++j) {
pCtx[j].pInput = pDataPtr[j] + pCtx[j].inputBytes * rowIndex;
}
for (int32_t j = 0; j < numOfExpr; ++j) {
int32_t functionId = pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
} else {
aAggs[functionId].mergeFunc(&pCtx[j]);
}
}
}
static void doFinalizeResultImpl(SMultiwayMergeInfo* pInfo, SQLFunctionCtx *pCtx, int32_t numOfExpr) {
for(int32_t j = 0; j < numOfExpr; ++j) {
int32_t functionId = pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
} else {
aAggs[functionId].xFinalize(&pCtx[j]);
}
}
}
......@@ -588,52 +628,18 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
SMultiwayMergeInfo* pInfo = pOperator->info;
SQLFunctionCtx* pCtx = pInfo->binfo.pCtx;
char** add = calloc(pBlock->info.numOfCols, POINTER_BYTES);
char** addrPtr = calloc(pBlock->info.numOfCols, POINTER_BYTES);
for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
add[i] = pCtx[i].pInput;
addrPtr[i] = pCtx[i].pInput;
pCtx[i].size = 1;
}
for(int32_t i = 0; i < pBlock->info.rows; ++i) {
if (pInfo->hasPrev) {
if (needToMerge(pBlock, pInfo->orderColumnList, i, pInfo->prevRow)) {
for (int32_t j = 0; j < numOfExpr; ++j) {
pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i;
}
for (int32_t j = 0; j < numOfExpr; ++j) {
int32_t functionId = pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
continue;
}
aAggs[functionId].mergeFunc(&pCtx[j]);
}
doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr);
} else {
for(int32_t j = 0; j < numOfExpr; ++j) { // TODO refactor
int32_t functionId = pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
continue;
}
aAggs[functionId].xFinalize(&pCtx[j]);
}
doFinalizeResultImpl(pInfo, pCtx, numOfExpr);
int32_t numOfRows = getNumOfResult(pOperator->pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput);
setTagValueForMultipleRows(pCtx, pOperator->numOfOutput, numOfRows);
......@@ -655,48 +661,10 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
aAggs[pCtx[j].functionId].init(&pCtx[j], pCtx[j].resultInfo);
}
for (int32_t j = 0; j < numOfExpr; ++j) {
pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i;
}
for (int32_t j = 0; j < numOfExpr; ++j) {
int32_t functionId = pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
continue;
}
aAggs[functionId].mergeFunc(&pCtx[j]);
}
doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr);
}
} else {
for (int32_t j = 0; j < numOfExpr; ++j) {
pCtx[j].pInput = add[j] + pCtx[j].inputBytes * i;
}
for (int32_t j = 0; j < numOfExpr; ++j) {
int32_t functionId = pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
continue;
}
aAggs[functionId].mergeFunc(&pCtx[j]);
}
doMergeResultImpl(pInfo, pCtx, numOfExpr, i, addrPtr);
}
savePrevOrderColumns(pInfo->prevRow, pInfo->orderColumnList, pBlock, i, &pInfo->hasPrev);
......@@ -704,11 +672,11 @@ static void doExecuteFinalMerge(SOperatorInfo* pOperator, int32_t numOfExpr, SSD
{
for(int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
pCtx[i].pInput = add[i];
pCtx[i].pInput = addrPtr[i];
}
}
tfree(add);
tfree(addrPtr);
}
static bool isAllSourcesCompleted(SGlobalMerger *pMerger) {
......@@ -816,6 +784,8 @@ SSDataBlock* doMultiwayMergeSort(void* param, bool* newgroup) {
SLocalDataSource *pOneDataSrc = pMerger->pLocalDataSrc[pTree->pNode[0].index];
bool sameGroup = true;
if (pInfo->hasPrev) {
// todo refactor extract method
int32_t numOfCols = (int32_t)taosArrayGetSize(pInfo->orderColumnList);
// if this row belongs to current result set group
......@@ -955,9 +925,10 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
break;
}
bool sameGroup = true;
if (pAggInfo->hasGroupColData) {
bool sameGroup = isSameGroup(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData);
if (!sameGroup) {
sameGroup = isSameGroup(pAggInfo->groupColumnList, pBlock, pAggInfo->currentGroupColData);
if (!sameGroup && !pAggInfo->multiGroupResults) {
*newgroup = true;
pAggInfo->hasDataBlockForNewGroup = true;
pAggInfo->pExistBlock = pBlock;
......@@ -976,26 +947,11 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
}
if (handleData) { // data in current group is all handled
for(int32_t j = 0; j < pOperator->numOfOutput; ++j) {
int32_t functionId = pAggInfo->binfo.pCtx[j].functionId;
if (functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
if (functionId < 0) {
SUdfInfo* pUdfInfo = taosArrayGet(pAggInfo->udfInfo, -1 * functionId - 1);
doInvokeUdf(pUdfInfo, &pAggInfo->binfo.pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
continue;
}
aAggs[functionId].xFinalize(&pAggInfo->binfo.pCtx[j]);
}
doFinalizeResultImpl(pAggInfo, pAggInfo->binfo.pCtx, pOperator->numOfOutput);
int32_t numOfRows = getNumOfResult(pOperator->pRuntimeEnv, pAggInfo->binfo.pCtx, pOperator->numOfOutput);
pAggInfo->binfo.pRes->info.rows += numOfRows;
pAggInfo->binfo.pRes->info.rows += numOfRows;
setTagValueForMultipleRows(pAggInfo->binfo.pCtx, pOperator->numOfOutput, numOfRows);
}
......@@ -1019,71 +975,127 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
return (pRes->info.rows != 0)? pRes:NULL;
}
static SSDataBlock* skipGroupBlock(SOperatorInfo* pOperator, bool* newgroup) {
SSLimitOperatorInfo *pInfo = pOperator->info;
assert(pInfo->currentGroupOffset >= 0);
static void doHandleDataInCurrentGroup(SSLimitOperatorInfo* pInfo, SSDataBlock* pBlock, int32_t rowIndex) {
if (pInfo->currentOffset > 0) {
pInfo->currentOffset -= 1;
} else {
// discard the data rows in current group
if (pInfo->limit.limit < 0 || (pInfo->limit.limit >= 0 && pInfo->rowsTotal < pInfo->limit.limit)) {
size_t num1 = taosArrayGetSize(pInfo->pRes->pDataBlock);
for (int32_t i = 0; i < num1; ++i) {
SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
SColumnInfoData *pDstInfoData = taosArrayGet(pInfo->pRes->pDataBlock, i);
SSDataBlock* pBlock = NULL;
if (pInfo->currentGroupOffset == 0) {
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
}
SColumnInfo *pColInfo = &pColInfoData->info;
char *pSrc = rowIndex * pColInfo->bytes + (char *)pColInfoData->pData;
char *pDst = (char *)pDstInfoData->pData + (pInfo->pRes->info.rows * pColInfo->bytes);
if (*newgroup == false && pInfo->limit.limit > 0 && pInfo->rowsTotal >= pInfo->limit.limit) {
while ((*newgroup) == false) { // ignore the remain blocks
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
return NULL;
}
memcpy(pDst, pSrc, pColInfo->bytes);
}
pInfo->rowsTotal += 1;
pInfo->pRes->info.rows += 1;
}
}
}
static void ensureOutputBuf(SSLimitOperatorInfo * pInfo, SSDataBlock *pResultBlock, int32_t numOfRows) {
if (pInfo->capacity < pResultBlock->info.rows + numOfRows) {
int32_t total = pResultBlock->info.rows + numOfRows;
size_t num = taosArrayGetSize(pResultBlock->pDataBlock);
for (int32_t i = 0; i < num; ++i) {
SColumnInfoData *pInfoData = taosArrayGet(pResultBlock->pDataBlock, i);
char *tmp = realloc(pInfoData->pData, total * pInfoData->info.bytes);
if (tmp != NULL) {
pInfoData->pData = tmp;
} else {
// todo handle the malloc failure
}
return pBlock;
pInfo->capacity = total;
pInfo->threshold = (int64_t) (total * 0.8);
}
}
}
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
enum {
BLOCK_NEW_GROUP = 1,
BLOCK_NO_GROUP = 2,
BLOCK_SAME_GROUP = 3,
};
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
return NULL;
static int32_t doSlimitImpl(SOperatorInfo* pOperator, SSLimitOperatorInfo* pInfo, SSDataBlock* pBlock) {
int32_t rowIndex = 0;
while (rowIndex < pBlock->info.rows) {
int32_t numOfCols = (int32_t)taosArrayGetSize(pInfo->orderColumnList);
bool samegroup = true;
if (pInfo->hasPrev) {
for (int32_t i = 0; i < numOfCols; ++i) {
SColIndex *pIndex = taosArrayGet(pInfo->orderColumnList, i);
SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, pIndex->colIndex);
SColumnInfo *pColInfo = &pColInfoData->info;
char *d = rowIndex * pColInfo->bytes + (char *)pColInfoData->pData;
int32_t ret = columnValueAscendingComparator(pInfo->prevRow[i], d, pColInfo->type, pColInfo->bytes);
if (ret != 0) { // it is a new group
samegroup = false;
break;
}
}
}
while(1) {
if (*newgroup) {
pInfo->currentGroupOffset -= 1;
*newgroup = false;
if (!samegroup || !pInfo->hasPrev) {
pInfo->ignoreCurrentGroup = false;
savePrevOrderColumns(pInfo->prevRow, pInfo->orderColumnList, pBlock, rowIndex, &pInfo->hasPrev);
pInfo->currentOffset = pInfo->limit.offset; // reset the offset value for a new group
pInfo->rowsTotal = 0;
if (pInfo->currentGroupOffset > 0) {
pInfo->ignoreCurrentGroup = true;
pInfo->currentGroupOffset -= 1; // now we are in the next group data
rowIndex += 1;
continue;
}
// A new group has arrived according to the result rows, and the group limitation has already reached.
// Let's jump out of current loop and return immediately.
if (pInfo->slimit.limit >= 0 && pInfo->groupTotal >= pInfo->slimit.limit) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
return BLOCK_NO_GROUP;
}
while ((*newgroup) == false) {
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
pInfo->groupTotal += 1;
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
return NULL;
}
// data in current group not allowed, return if current result does not belong to the previous group.And there
// are results exists in current SSDataBlock
if (!pInfo->multigroupResult && !samegroup && pInfo->pRes->info.rows > 0) {
return BLOCK_NEW_GROUP;
}
// now we have got the first data block of the next group.
if (pInfo->currentGroupOffset == 0) {
return pBlock;
doHandleDataInCurrentGroup(pInfo, pBlock, rowIndex);
} else { // handle the offset in the same group
// All the data in current group needs to be discarded, due to the limit parameter in the SQL statement
if (pInfo->ignoreCurrentGroup) {
rowIndex += 1;
continue;
}
doHandleDataInCurrentGroup(pInfo, pBlock, rowIndex);
}
return NULL;
rowIndex += 1;
}
return BLOCK_SAME_GROUP;
}
SSDataBlock* doSLimit(void* param, bool* newgroup) {
......@@ -1093,63 +1105,41 @@ SSDataBlock* doSLimit(void* param, bool* newgroup) {
}
SSLimitOperatorInfo *pInfo = pOperator->info;
pInfo->pRes->info.rows = 0;
SSDataBlock *pBlock = NULL;
while (1) {
pBlock = skipGroupBlock(pOperator, newgroup);
if (pBlock == NULL) {
setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
pOperator->status = OP_EXEC_DONE;
return NULL;
}
if (*newgroup) { // a new group arrives
pInfo->groupTotal += 1;
pInfo->rowsTotal = 0;
pInfo->currentOffset = pInfo->limit.offset;
}
if (pInfo->pPrevBlock != NULL) {
ensureOutputBuf(pInfo, pInfo->pRes, pInfo->pPrevBlock->info.rows);
int32_t ret = doSlimitImpl(pOperator, pInfo, pInfo->pPrevBlock);
assert(ret != BLOCK_NEW_GROUP);
assert(pInfo->currentGroupOffset == 0);
if (pInfo->currentOffset >= pBlock->info.rows) {
pInfo->currentOffset -= pBlock->info.rows;
} else {
if (pInfo->currentOffset == 0) {
break;
}
int32_t remain = (int32_t)(pBlock->info.rows - pInfo->currentOffset);
pBlock->info.rows = remain;
pInfo->pPrevBlock = NULL;
}
// move the remain rows of this data block to the front.
for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
SColumnInfoData *pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
assert(pInfo->currentGroupOffset >= 0);
int16_t bytes = pColInfoData->info.bytes;
memmove(pColInfoData->pData, pColInfoData->pData + bytes * pInfo->currentOffset, remain * bytes);
}
while(1) {
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_BEFORE_OPERATOR_EXEC);
SSDataBlock *pBlock = pOperator->upstream[0]->exec(pOperator->upstream[0], newgroup);
publishOperatorProfEvent(pOperator->upstream[0], QUERY_PROF_AFTER_OPERATOR_EXEC);
pInfo->currentOffset = 0;
break;
if (pBlock == NULL) {
return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
}
}
if (pInfo->slimit.limit > 0 && pInfo->groupTotal > pInfo->slimit.limit) { // reach the group limit, abort
return NULL;
}
if (pInfo->limit.limit > 0 && (pInfo->rowsTotal + pBlock->info.rows >= pInfo->limit.limit)) {
pBlock->info.rows = (int32_t)(pInfo->limit.limit - pInfo->rowsTotal);
pInfo->rowsTotal = pInfo->limit.limit;
ensureOutputBuf(pInfo, pInfo->pRes, pBlock->info.rows);
int32_t ret = doSlimitImpl(pOperator, pInfo, pBlock);
if (ret == BLOCK_NEW_GROUP) {
pInfo->pPrevBlock = pBlock;
return pInfo->pRes;
}
if (pInfo->slimit.limit > 0 && pInfo->groupTotal >= pInfo->slimit.limit) {
pOperator->status = OP_EXEC_DONE;
if (pOperator->status == OP_EXEC_DONE) {
return pInfo->pRes->info.rows == 0 ? NULL : pInfo->pRes;
}
// setQueryStatus(pOperator->pRuntimeEnv, QUERY_COMPLETED);
} else {
pInfo->rowsTotal += pBlock->info.rows;
// now the number of rows in current group is enough, let's return to the invoke function
if (pInfo->pRes->info.rows > pInfo->threshold) {
return pInfo->pRes;
}
}
return pBlock;
}
......@@ -1758,6 +1758,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int32_t numOfRow
pSql->res.numOfRows = 0;
code = doPackSendDataBlock(pSql, pInsertParam, pTableMeta, count, pTableDataBlock);
if (code != TSDB_CODE_SUCCESS) {
pParentSql->res.code = code;
goto _error;
}
......
......@@ -422,7 +422,6 @@ int32_t readFromFile(char *name, uint32_t *len, void **buf) {
return TSDB_CODE_TSC_APP_ERROR;
}
close(fd);
tfree(*buf);
return TSDB_CODE_SUCCESS;
}
......@@ -926,7 +925,6 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
pQueryInfo = pCmd->active;
pQueryInfo->pUdfInfo = pUdfInfo;
pQueryInfo->udfCopy = true;
}
}
......@@ -6917,7 +6915,6 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
const char* msg1 = "interval not allowed in group by normal column";
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
SSchema* tagSchema = NULL;
......@@ -8439,6 +8436,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS
if (taosArrayGetSize(subInfo->pSubquery) >= 2) {
return invalidOperationMsg(msgBuf, "not support union in subquery");
}
SQueryInfo* pSub = calloc(1, sizeof(SQueryInfo));
tscInitQueryInfo(pSub);
......@@ -8461,6 +8459,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SSqlObj* pS
if (pTableMetaInfo1 == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
pTableMetaInfo1->pTableMeta = extractTempTableMetaFromSubquery(pSub);
pTableMetaInfo1->tableMetaCapacity = tscGetTableMetaSize(pTableMetaInfo1->pTableMeta);
......@@ -8544,7 +8543,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
// check if there is 3 level select
SRelElementPair* subInfo = taosArrayGet(pSqlNode->from->list, i);
SSqlNode* p = taosArrayGetP(subInfo->pSubquery, 0);
if (p->from->type == SQL_NODE_FROM_SUBQUERY){
if (p->from->type == SQL_NODE_FROM_SUBQUERY) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9);
}
......@@ -8636,6 +8635,15 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
}
}
// disable group result mixed up if interval/session window query exists.
if (isTimeWindowQuery(pQueryInfo)) {
size_t num = taosArrayGetSize(pQueryInfo->pUpstream);
for(int32_t i = 0; i < num; ++i) {
SQueryInfo* pUp = taosArrayGetP(pQueryInfo->pUpstream, i);
pUp->multigroupResult = false;
}
}
// parse the having clause in the first place
int32_t joinQuery = (pSqlNode->from != NULL && taosArrayGetSize(pSqlNode->from->list) > 1);
if (validateHavingClause(pQueryInfo, pSqlNode->pHaving, pCmd, pSqlNode->pSelNodeList, joinQuery, timeWindowQuery) !=
......
......@@ -2265,7 +2265,7 @@ void tscFirstRoundCallback(void* param, TAOS_RES* tres, int code) {
destroySup(pSup);
taos_free_result(pSql);
parent->res.code = code;
parent->res.code = c;
tscAsyncResultOnError(parent);
return;
}
......
......@@ -3078,6 +3078,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
pQueryInfo->slimit.offset = 0;
pQueryInfo->pUpstream = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->window = TSWINDOW_INITIALIZER;
pQueryInfo->multigroupResult = true;
}
int32_t tscAddQueryInfo(SSqlCmd* pCmd) {
......@@ -3089,7 +3090,6 @@ int32_t tscAddQueryInfo(SSqlCmd* pCmd) {
}
tscInitQueryInfo(pQueryInfo);
pQueryInfo->msg = pCmd->payload; // pointer to the parent error message buffer
if (pCmd->pQueryInfo == NULL) {
......@@ -3171,6 +3171,7 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) {
pQueryInfo->window = pSrc->window;
pQueryInfo->sessionWindow = pSrc->sessionWindow;
pQueryInfo->pTableMetaInfo = NULL;
pQueryInfo->multigroupResult = pSrc->multigroupResult;
pQueryInfo->bufLen = pSrc->bufLen;
pQueryInfo->orderProjectQuery = pSrc->orderProjectQuery;
......@@ -3570,7 +3571,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
pNewQueryInfo->pTableMetaInfo = NULL;
pNewQueryInfo->bufLen = pQueryInfo->bufLen;
pNewQueryInfo->buf = malloc(pQueryInfo->bufLen);
pNewQueryInfo->multigroupResult = pQueryInfo->multigroupResult;
pNewQueryInfo->distinct = pQueryInfo->distinct;
if (pNewQueryInfo->buf == NULL) {
......@@ -3795,12 +3796,12 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
if (code && !((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && pParentSql->retry < pParentSql->maxRetry)) {
pParentSql->res.code = code;
tscAsyncResultOnError(pParentSql);
return;
}
tscFreeSubobj(pParentSql);
tscFreeSubobj(pParentSql);
tfree(pParentSql->pSubs);
pParentSql->res.code = TSDB_CODE_SUCCESS;
......@@ -3809,9 +3810,9 @@ static void tscSubqueryCompleteCallback(void* param, TAOS_RES* tres, int code) {
tscDebug("0x%"PRIx64" retry parse sql and send query, prev error: %s, retry:%d", pParentSql->self,
tstrerror(code), pParentSql->retry);
tscResetSqlCmd(&pParentSql->cmd, true);
code = tsParseSql(pParentSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
return;
......@@ -3880,7 +3881,7 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
pNew->maxRetry = pSql->maxRetry;
pNew->cmd.resColumnId = TSDB_RES_COL_ID;
tsem_init(&pNew->rspSem, 0, 0);
SRetrieveSupport* ps = calloc(1, sizeof(SRetrieveSupport)); // todo use object id
......@@ -4416,7 +4417,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
size_t sz = 0;
STableMeta* pChild = *ppChild;
STableMeta* pChild1;
taosHashGetCloneExt(tscTableMetaMap, pChild->sTableName, strnlen(pChild->sTableName, TSDB_TABLE_FNAME_LEN), NULL, (void **)&p, &sz);
// tableMeta exists, build child table meta according to the super table meta
......@@ -4427,9 +4428,9 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta** ppChild, const char* name,
int32_t tableMetaSize = sizeof(STableMeta) + totalBytes;
if (*tableMetaCapacity < tableMetaSize) {
pChild1 = realloc(pChild, tableMetaSize);
if(pChild1 == NULL)
if(pChild1 == NULL)
return -1;
pChild = pChild1;
pChild = pChild1;
*tableMetaCapacity = (size_t)tableMetaSize;
}
......@@ -4701,6 +4702,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
pQueryAttr->distinct = pQueryInfo->distinct;
pQueryAttr->sw = pQueryInfo->sessionWindow;
pQueryAttr->stateWindow = pQueryInfo->stateWindow;
pQueryAttr->multigroupResult = pQueryInfo->multigroupResult;
pQueryAttr->numOfCols = numOfCols;
pQueryAttr->numOfOutput = numOfOutput;
......
......@@ -84,7 +84,7 @@ int8_t tsTscEnableRecordSql = 0;
// the maximum number of results for projection query on super table that are returned from
// one virtual node, to order according to timestamp
int32_t tsMaxNumOfOrderedResults = 100000;
int32_t tsMaxNumOfOrderedResults = 1000000;
// 10 ms for sliding time, the value will changed in case of time precision changed
int32_t tsMinSlidingTime = 10;
......@@ -1017,8 +1017,8 @@ static void doInitGlobalConfig(void) {
cfg.ptr = &tsMaxNumOfOrderedResults;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MAX_SQL_LEN;
cfg.maxValue = TSDB_MAX_ALLOWED_SQL_LEN;
cfg.minValue = 100000;
cfg.maxValue = 100000000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
......
......@@ -102,7 +102,7 @@ extern char configDir[];
#define NOTE_BUFF_LEN (SMALL_BUFF_LEN*16)
#define DEFAULT_TIMESTAMP_STEP 1
#define DEFAULT_CHILDTABLES 10000
enum TEST_MODE {
INSERT_TEST, // 0
......@@ -625,7 +625,7 @@ SArguments g_args = {
0, // interlace_rows;
30000, // num_of_RPR
(1024*1024), // max_sql_len
10000, // num_of_tables
DEFAULT_CHILDTABLES, // num_of_tables
10000, // num_of_DPT
0, // abort
0, // disorderRatio
......@@ -639,7 +639,7 @@ SArguments g_args = {
static SDbs g_Dbs;
static int64_t g_totalChildTables = 0;
static int64_t g_totalChildTables = DEFAULT_CHILDTABLES;
static int64_t g_actualChildTables = 0;
static SQueryMetaInfo g_queryInfo;
static FILE * g_fpOfInsertResult = NULL;
......@@ -9154,6 +9154,7 @@ int main(int argc, char *argv[]) {
debugPrint("meta file: %s\n", g_args.metaFile);
if (g_args.metaFile) {
g_totalChildTables = 0;
initOfInsertMeta();
initOfQueryMeta();
......
......@@ -219,6 +219,7 @@ typedef struct SQueryAttr {
bool distinct; // distinct query or not
bool stateWindow; // window State on sub/normal table
bool createFilterOperator; // if filter operator is needed
bool multigroupResult; // multigroup result can exist in one SSDataBlock
int32_t interBufSize; // intermediate buffer sizse
int32_t havingNum; // having expr number
......@@ -460,16 +461,23 @@ typedef struct SLimitOperatorInfo {
} SLimitOperatorInfo;
typedef struct SSLimitOperatorInfo {
int64_t groupTotal;
int64_t currentGroupOffset;
int64_t rowsTotal;
int64_t currentOffset;
SLimitVal limit;
SLimitVal slimit;
char **prevRow;
SArray *orderColumnList;
int64_t groupTotal;
int64_t currentGroupOffset;
int64_t rowsTotal;
int64_t currentOffset;
SLimitVal limit;
SLimitVal slimit;
char **prevRow;
SArray *orderColumnList;
bool hasPrev;
bool ignoreCurrentGroup;
bool multigroupResult;
SSDataBlock *pRes; // result buffer
SSDataBlock *pPrevBlock;
int64_t capacity;
int64_t threshold;
} SSLimitOperatorInfo;
typedef struct SFilterOperatorInfo {
......@@ -481,8 +489,9 @@ typedef struct SFillOperatorInfo {
SFillInfo *pFillInfo;
SSDataBlock *pRes;
int64_t totalInputRows;
void **p;
SSDataBlock *existNewGroupBlock;
bool multigroupResult;
} SFillOperatorInfo;
typedef struct SGroupbyOperatorInfo {
......@@ -544,9 +553,9 @@ typedef struct SMultiwayMergeInfo {
bool hasDataBlockForNewGroup;
SSDataBlock *pExistBlock;
bool hasPrev;
bool groupMix;
SArray *udfInfo;
bool hasPrev;
bool multiGroupResults;
} SMultiwayMergeInfo;
// todo support the disk-based sort
......@@ -568,7 +577,7 @@ SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI
SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createAllTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, bool multigroupResult);
SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
......@@ -577,10 +586,10 @@ SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInf
SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
SOperatorInfo* createMultiwaySortOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput,
int32_t numOfRows, void* merger, bool groupMix);
SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo);
int32_t numOfRows, void* merger);
SOperatorInfo* createGlobalAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* param, SArray* pUdfInfo, bool groupResultMixedUp);
SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger);
SOperatorInfo* createSLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, void* merger, bool multigroupResult);
SOperatorInfo* createFilterOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr,
int32_t numOfOutput, SColumnInfo* pCols, int32_t numOfFilter);
......
......@@ -148,6 +148,7 @@ typedef struct SQueryInfo {
bool orderProjectQuery;
bool stateWindow;
bool globalMerge;
bool multigroupResult;
} SQueryInfo;
/**
......
此差异已折叠。
......@@ -430,7 +430,7 @@ void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const SSDataBlock* pInput)
SColumnInfoData* pColData = taosArrayGet(pInput->pDataBlock, i);
pFillInfo->pData[i] = pColData->pData;
if (TSDB_COL_IS_TAG(pCol->flag)/* || IS_VAR_DATA_TYPE(pCol->col.type)*/) { // copy the tag value to tag value buffer
if (TSDB_COL_IS_TAG(pCol->flag)) { // copy the tag value to tag value buffer
SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex];
assert (pTag->col.colId == pCol->col.colId);
memcpy(pTag->tagVal, pColData->pData, pCol->col.bytes); // TODO not memcpy??
......
......@@ -375,6 +375,16 @@ STSBlock* readDataFromDisk(STSBuf* pTSBuf, int32_t order, bool decomp) {
sz = fread(pBlock->payload, (size_t)pBlock->compLen, 1, pTSBuf->f);
if (decomp) {
if (pBlock->numOfElem * TSDB_KEYSIZE > pTSBuf->tsData.allocSize) {
pTSBuf->tsData.rawBuf = realloc(pTSBuf->tsData.rawBuf, pBlock->numOfElem * TSDB_KEYSIZE);
pTSBuf->tsData.allocSize = pBlock->numOfElem * TSDB_KEYSIZE;
}
if (pBlock->numOfElem * TSDB_KEYSIZE > pTSBuf->bufSize) {
pTSBuf->assistBuf = realloc(pTSBuf->assistBuf, pBlock->numOfElem * TSDB_KEYSIZE);
pTSBuf->bufSize = pBlock->numOfElem * TSDB_KEYSIZE;
}
pTSBuf->tsData.len =
tsDecompressTimestamp(pBlock->payload, pBlock->compLen, pBlock->numOfElem, pTSBuf->tsData.rawBuf,
pTSBuf->tsData.allocSize, TWO_STAGE_COMP, pTSBuf->assistBuf, pTSBuf->bufSize);
......@@ -471,7 +481,7 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t id, tVariant* tag, const char* pData, i
// the size of raw data exceeds the size of the default prepared buffer, so
// during getBufBlock, the output buffer needs to be large enough.
if (ptsData->len >= ptsData->threshold) {
if (ptsData->len >= ptsData->threshold - TSDB_KEYSIZE) {
writeDataToDisk(pTSBuf);
shrinkBuffer(ptsData);
}
......@@ -603,6 +613,10 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t groupIndex, int32_t blockIndex
expandBuffer(&pTSBuf->tsData, (int32_t)s);
}
if (s > pTSBuf->bufSize) {
pTSBuf->assistBuf = realloc(pTSBuf->assistBuf, s);
pTSBuf->bufSize = (int32_t)s;
}
pTSBuf->tsData.len =
tsDecompressTimestamp(pBlock->payload, pBlock->compLen, pBlock->numOfElem, pTSBuf->tsData.rawBuf,
pTSBuf->tsData.allocSize, TWO_STAGE_COMP, pTSBuf->assistBuf, pTSBuf->bufSize);
......
......@@ -15,6 +15,7 @@ import sys
import subprocess
import random
import math
import numpy as np
from util.log import *
from util.cases import *
......@@ -57,16 +58,33 @@ class TDTestCase:
def td3690(self):
tdLog.printNoPrefix("==========TD-3690==========")
tdSql.prepare()
tdSql.execute("show variables")
res_off = tdSql.cursor.fetchall()
resList = np.array(res_off)
index = np.where(resList == "offlineThreshold")
index_value = np.dstack((index[0])).squeeze()
tdSql.query("show variables")
tdSql.checkData(53, 1, 864000)
tdSql.checkData(index_value, 1, 864000)
def td4082(self):
tdLog.printNoPrefix("==========TD-4082==========")
tdSql.prepare()
cfgfile = self.getCfgFile()
max_compressMsgSize = 100000000
tdSql.execute("show variables")
res_com = tdSql.cursor.fetchall()
rescomlist = np.array(res_com)
cpms_index = np.where(rescomlist == "compressMsgSize")
index_value = np.dstack((cpms_index[0])).squeeze()
tdSql.query("show variables")
tdSql.checkData(26, 1, -1)
tdSql.checkData(index_value, 1, -1)
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
......@@ -80,7 +98,7 @@ class TDTestCase:
tdDnodes.start(index)
tdSql.query("show variables")
tdSql.checkData(26, 1, 100000000)
tdSql.checkData(index_value, 1, 100000000)
tdDnodes.stop(index)
cmd = f"sed -i '$s/{max_compressMsgSize}/{max_compressMsgSize+10}/g' {cfgfile} "
......@@ -91,7 +109,7 @@ class TDTestCase:
tdDnodes.start(index)
tdSql.query("show variables")
tdSql.checkData(26, 1, -1)
tdSql.checkData(index_value, 1, -1)
tdDnodes.stop(index)
cmd = f"sed -i '$d' {cfgfile}"
......@@ -104,8 +122,12 @@ class TDTestCase:
def td4097(self):
tdLog.printNoPrefix("==========TD-4097==========")
tdSql.execute("drop database if exists db")
tdSql.execute("drop database if exists db1")
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("create database if not exists db keep 3650")
tdSql.execute("create database if not exists db1 keep 3650")
tdSql.execute("create database if not exists new keep 3650")
......@@ -267,10 +289,22 @@ class TDTestCase:
# keep ~ [days,365000]
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db")
tdSql.execute("show variables")
res_kp = tdSql.cursor.fetchall()
resList = np.array(res_kp)
keep_index = np.where(resList == "keep")
index_value = np.dstack((keep_index[0])).squeeze()
tdSql.query("show variables")
tdSql.checkData(38, 1, 3650)
tdSql.checkData(index_value, 1, 3650)
tdSql.query("show databases")
tdSql.checkData(0,7,"3650,3650,3650")
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
tdSql.checkData(0, 7, "3650,3650,3650")
else:
tdSql.checkData(0, 7, 3650)
days = tdSql.getData(0, 6)
tdSql.error("alter database db keep 3650001")
......@@ -289,14 +323,22 @@ class TDTestCase:
tdSql.execute("alter database db keep 36500")
tdSql.query("show databases")
tdSql.checkData(0, 7, "3650,3650,36500")
if ("community" in selfPath):
tdSql.checkData(0, 7, "36500,36500,36500")
else:
tdSql.checkData(0, 7, 36500)
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db1")
tdSql.query("show databases")
tdSql.checkData(0, 7, "3650,3650,3650")
if ("community" in selfPath):
tdSql.checkData(0, 7, "3650,3650,3650")
else:
tdSql.checkData(0, 7, 3650)
tdSql.query("show variables")
tdSql.checkData(38, 1, 3650)
tdSql.checkData(index_value, 1, 3650)
tdSql.execute("alter database db1 keep 365")
tdSql.execute("drop database if exists db1")
......@@ -697,10 +739,8 @@ class TDTestCase:
tdSql.checkRows(tbnum*3)
tdSql.query(f"select distinct c1 c2, c2 c3 from t1 where c1 <{tbnum}")
tdSql.checkRows(3)
tdSql.query("select distinct c1, c2 from stb1 order by ts")
tdSql.checkRows(tbnum*3+1)
tdSql.query("select distinct c1, c2 from t1 order by ts")
tdSql.checkRows(4)
tdSql.error("select distinct c1, c2 from stb1 order by ts")
tdSql.error("select distinct c1, c2 from t1 order by ts")
tdSql.error("select distinct c1, ts from stb1 group by c2")
tdSql.error("select distinct c1, ts from t1 group by c2")
tdSql.error("select distinct c1, max(c2) from stb1 ")
......@@ -975,6 +1015,84 @@ class TDTestCase:
tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
tdSql.error("select ts as t, top(c1, 0) from t1")
tdSql.query("select ts as t, top(c1, 5) from t1")
tdSql.checkRows(5)
tdSql.checkCols(3)
for i in range(5):
data=tdSql.getData(i, 0)
tdSql.checkData(i, 1, data)
tdSql.query("select ts as t, top(c1, 5) from stb1")
tdSql.checkRows(5)
tdSql.query("select ts as t, top(c1, 5) from stb1 group by tbname")
tdSql.checkRows(500)
tdSql.query("select ts as t, top(c1, 8) from t1")
tdSql.checkRows(6)
tdSql.query("select ts as t, top(c2, 8) from t1")
tdSql.checkRows(6)
tdSql.error("select ts as t, top(c3, 5) from t1")
tdSql.error("select ts as t, top(c4, 5) from t1")
tdSql.query("select ts as t, top(c5, 8) from t1")
tdSql.checkRows(6)
tdSql.error("select ts as t, top(c6, 5) from t1")
tdSql.error("select ts as t, top(c5, 8) as b from t1 order by b")
tdSql.error("select ts as t, top(t1, 1) from t1")
tdSql.error("select ts as t, top(t1, 1) from stb1")
tdSql.error("select ts as t, top(t1, 3) from stb1 order by c3")
tdSql.error("select ts as t, top(t1, 3) from t1 order by c3")
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.query("select ts as t, diff(c1) from t1")
tdSql.checkRows(5)
tdSql.checkCols(3)
for i in range(5):
data=tdSql.getData(i, 0)
tdSql.checkData(i, 1, data)
tdSql.query("select ts as t, diff(c1) from stb1 group by tbname")
tdSql.checkRows(500)
tdSql.checkCols(4)
tdSql.query("select ts as t, diff(c1) from t1")
tdSql.query("select ts as t, diff(c1) from t1")
tdSql.error("select ts as t, diff(c1) from stb1")
tdSql.query("select ts as t, diff(c2) from t1")
tdSql.checkRows(5)
tdSql.error("select ts as t, diff(c3) from t1")
tdSql.error("select ts as t, diff(c4) from t1")
tdSql.query("select ts as t, diff(c5) from t1")
tdSql.checkRows(5)
tdSql.error("select ts as t, diff(c6) from t1")
tdSql.error("select ts as t, diff(t1) from t1")
tdSql.error("select ts as t, diff(c1, c2) from t1")
tdSql.error("select ts as t, bottom(c1, 0) from t1")
tdSql.query("select ts as t, bottom(c1, 5) from t1")
tdSql.checkRows(5)
tdSql.checkCols(3)
for i in range(5):
data=tdSql.getData(i, 0)
tdSql.checkData(i, 1, data)
tdSql.query("select ts as t, bottom(c1, 5) from stb1")
tdSql.checkRows(5)
tdSql.query("select ts as t, bottom(c1, 5) from stb1 group by tbname")
tdSql.checkRows(500)
tdSql.query("select ts as t, bottom(c1, 8) from t1")
tdSql.checkRows(6)
tdSql.query("select ts as t, bottom(c2, 8) from t1")
tdSql.checkRows(6)
tdSql.error("select ts as t, bottom(c3, 5) from t1")
tdSql.error("select ts as t, bottom(c4, 5) from t1")
tdSql.query("select ts as t, bottom(c5, 8) from t1")
tdSql.checkRows(6)
tdSql.error("select ts as t, bottom(c6, 5) from t1")
tdSql.error("select ts as t, bottom(c5, 8) as b from t1 order by b")
tdSql.error("select ts as t, bottom(t1, 1) from t1")
tdSql.error("select ts as t, bottom(t1, 1) from stb1")
tdSql.error("select ts as t, bottom(t1, 3) from stb1 order by c3")
tdSql.error("select ts as t, bottom(t1, 3) from t1 order by c3")
tdSql.error("select ts as t, top(c1, 0) from t1")
tdSql.query("select ts as t, top(c1, 5) from t1")
tdSql.checkRows(5)
......@@ -1007,9 +1125,9 @@ class TDTestCase:
def run(self):
# master branch
# self.td3690()
# self.td4082()
# self.td4288()
self.td3690()
self.td4082()
self.td4288()
# self.td4724()
# self.td5798()
# self.td5935()
......
......@@ -12,6 +12,8 @@
# -*- coding: utf-8 -*-
import sys
import numpy as np
from util.log import *
from util.cases import *
from util.sql import *
......@@ -24,8 +26,17 @@ class TDTestCase:
tdSql.init(conn.cursor(), logSql)
def run(self):
# tdSql.query("show variables")
# tdSql.checkData(54, 1, 864000)
tdSql.execute("show variables")
res = tdSql.cursor.fetchall()
resList = np.array(res)
index = np.where(resList == "offlineThreshold")
index_value = np.dstack((index[0])).squeeze()
tdSql.query("show variables")
tdSql.checkData(54, 1, 864000)
tdSql.checkData(index_value, 1, 864000)
pass
def stop(self):
tdSql.close()
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts = 1538548685000
def run(self):
tdSql.prepare()
print("==============step1")
tdSql.execute(
"create table if not exists st (ts timestamp, tagtype int) tags(dev nchar(50))")
tdSql.execute(
'CREATE TABLE if not exists dev_001 using st tags("dev_01")')
tdSql.execute(
'CREATE TABLE if not exists dev_002 using st tags("dev_02")')
print("==============step2")
tdSql.execute(
"""INSERT INTO dev_001(ts, tagtype) VALUES('2020-05-13 10:00:00.000', 1),
('2020-05-13 10:00:00.001', 1)
dev_002 VALUES('2020-05-13 10:00:00.001', 1)""")
tdSql.query("select * from db.st where ts='2020-05-13 10:00:00.000'")
tdSql.checkRows(1)
tdSql.query("select tbname, dev from dev_001")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'dev_001')
tdSql.checkData(0, 1, 'dev_01')
tdSql.query("select tbname, dev, tagtype from dev_001")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 'dev_001')
tdSql.checkData(0, 1, 'dev_01')
tdSql.checkData(0, 2, 1)
tdSql.checkData(1, 0, 'dev_001')
tdSql.checkData(1, 1, 'dev_01')
tdSql.checkData(1, 2, 1)
## test case for https://jira.taosdata.com:18080/browse/TD-2488
tdSql.execute("create table m1(ts timestamp, k int) tags(a int)")
tdSql.execute("create table t1 using m1 tags(1)")
tdSql.execute("create table t2 using m1 tags(2)")
tdSql.execute("insert into t1 values('2020-1-1 1:1:1', 1)")
tdSql.execute("insert into t1 values('2020-1-1 1:10:1', 2)")
tdSql.execute("insert into t2 values('2020-1-1 1:5:1', 99)")
tdSql.query("select count(*) from m1 where ts = '2020-1-1 1:5:1' ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 1)
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.query("select count(*) from m1 where ts = '2020-1-1 1:5:1' ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 1)
## test case for https://jira.taosdata.com:18080/browse/TD-1930
tdSql.execute("create table tb(ts timestamp, c1 int, c2 binary(10), c3 nchar(10), c4 float, c5 bool)")
for i in range(10):
tdSql.execute("insert into tb values(%d, %d, 'binary%d', 'nchar%d', %f, %d)" % (self.ts + i, i, i, i, i + 0.1, i % 2))
tdSql.error("select * from tb where c2 = binary2")
tdSql.error("select * from tb where c3 = nchar2")
tdSql.query("select * from tb where c2 = 'binary2' ")
tdSql.checkRows(1)
tdSql.query("select * from tb where c3 = 'nchar2' ")
tdSql.checkRows(1)
tdSql.query("select * from tb where c1 = '2' ")
tdSql.checkRows(1)
tdSql.query("select * from tb where c1 = 2 ")
tdSql.checkRows(1)
tdSql.query("select * from tb where c4 = '0.1' ")
tdSql.checkRows(1)
tdSql.query("select * from tb where c4 = 0.1 ")
tdSql.checkRows(1)
tdSql.query("select * from tb where c5 = true ")
tdSql.checkRows(5)
tdSql.query("select * from tb where c5 = 'true' ")
tdSql.checkRows(5)
# For jira: https://jira.taosdata.com:18080/browse/TD-2850
tdSql.execute("create database 'Test' ")
tdSql.execute("use 'Test' ")
tdSql.execute("create table 'TB'(ts timestamp, 'Col1' int) tags('Tag1' int)")
tdSql.execute("insert into 'Tb0' using tb tags(1) values(now, 1)")
tdSql.query("select * from tb")
tdSql.checkRows(1)
tdSql.query("select * from tb0")
tdSql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.ts = 1538548685000
def run(self):
tdSql.prepare()
print("==============step1")
tdSql.execute(
"create table if not exists st (ts timestamp, tagtype int) tags(dev nchar(50))")
tdSql.execute(
'CREATE TABLE if not exists dev_001 using st tags("dev_01")')
tdSql.execute(
'CREATE TABLE if not exists dev_002 using st tags("dev_02")')
print("==============step2")
tdSql.execute(
"""INSERT INTO dev_001(ts, tagtype) VALUES('2020-05-13 10:00:00.000', 1),
('2020-05-13 10:00:00.001', 1)
dev_002 VALUES('2020-05-13 10:00:00.001', 1)""")
tdSql.query("select * from db.st where ts='2020-05-13 10:00:00.000'")
tdSql.checkRows(1)
tdSql.query("select tbname, dev from dev_001")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'dev_001')
tdSql.checkData(0, 1, 'dev_01')
tdSql.query("select tbname, dev, tagtype from dev_001")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 'dev_001')
tdSql.checkData(0, 1, 'dev_01')
tdSql.checkData(0, 2, 1)
tdSql.checkData(1, 0, 'dev_001')
tdSql.checkData(1, 1, 'dev_01')
tdSql.checkData(1, 2, 1)
## test case for https://jira.taosdata.com:18080/browse/TD-2488
tdSql.execute("create table m1(ts timestamp, k int) tags(a int)")
tdSql.execute("create table t1 using m1 tags(1)")
tdSql.execute("create table t2 using m1 tags(2)")
tdSql.execute("insert into t1 values('2020-1-1 1:1:1', 1)")
tdSql.execute("insert into t1 values('2020-1-1 1:10:1', 2)")
tdSql.execute("insert into t2 values('2020-1-1 1:5:1', 99)")
tdSql.query("select count(*) from m1 where ts = '2020-1-1 1:5:1' ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 1)
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.query("select count(*) from m1 where ts = '2020-1-1 1:5:1' ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 1)
## test case for https://jira.taosdata.com:18080/browse/TD-1930
tdSql.execute("create table tb(ts timestamp, c1 int, c2 binary(10), c3 nchar(10), c4 float, c5 bool)")
for i in range(10):
tdSql.execute(
"insert into tb values(%d, %d, 'binary%d', 'nchar%d', %f, %d)" % (self.ts + i, i, i, i, i + 0.1, i % 2))
tdSql.error("select * from tb where c2 = binary2")
tdSql.error("select * from tb where c3 = nchar2")
tdSql.query("select * from tb where c2 = 'binary2' ")
tdSql.checkRows(1)
tdSql.query("select * from tb where c3 = 'nchar2' ")
tdSql.checkRows(1)
tdSql.query("select * from tb where c1 = '2' ")
tdSql.checkRows(1)
tdSql.query("select * from tb where c1 = 2 ")
tdSql.checkRows(1)
tdSql.query("select * from tb where c4 = '0.1' ")
tdSql.checkRows(1)
tdSql.query("select * from tb where c4 = 0.1 ")
tdSql.checkRows(1)
tdSql.query("select * from tb where c5 = true ")
tdSql.checkRows(5)
tdSql.query("select * from tb where c5 = 'true' ")
tdSql.checkRows(5)
# For jira: https://jira.taosdata.com:18080/browse/TD-2850
tdSql.execute("create database 'Test' ")
tdSql.execute("use 'Test' ")
tdSql.execute("create table 'TB'(ts timestamp, 'Col1' int) tags('Tag1' int)")
tdSql.execute("insert into 'Tb0' using tb tags(1) values(now, 1)")
tdSql.query("select * from tb")
tdSql.checkRows(1)
# For jira:https://jira.taosdata.com:18080/browse/TD-6314
tdSql.execute("use db")
tdSql.execute("create stable stb_001(ts timestamp,v int) tags(c0 int)")
tdSql.query("select _block_dist() from stb_001")
tdSql.checkRows(1)
tdSql.query("select * from tb0")
tdSql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
......@@ -17,6 +17,7 @@ import os
import taos
import time
import argparse
import json
class taosdemoQueryPerformace:
......@@ -48,7 +49,7 @@ class taosdemoQueryPerformace:
cursor2 = self.conn2.cursor()
cursor2.execute("create database if not exists %s" % self.dbName)
cursor2.execute("use %s" % self.dbName)
cursor2.execute("create table if not exists %s(ts timestamp, query_time float, commit_id binary(50), branch binary(50), type binary(20)) tags(query_id int, query_sql binary(300))" % self.stbName)
cursor2.execute("create table if not exists %s(ts timestamp, query_time_avg float, query_time_max float, query_time_min float, commit_id binary(50), branch binary(50), type binary(20)) tags(query_id int, query_sql binary(300))" % self.stbName)
sql = "select count(*) from test.meters"
tableid = 1
......@@ -74,7 +75,7 @@ class taosdemoQueryPerformace:
tableid = 6
cursor2.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
sql = "select * from meters"
sql = "select * from meters limit 10000"
tableid = 7
cursor2.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
......@@ -87,37 +88,96 @@ class taosdemoQueryPerformace:
cursor2.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql))
cursor2.close()
def generateQueryJson(self):
sqls = []
cursor2 = self.conn2.cursor()
cursor2.execute("select query_id, query_sql from %s.%s" % (self.dbName, self.stbName))
i = 0
for data in cursor2:
sql = {
"sql": data[1],
"result_mode": "onlyformat",
"result_file": "./query_sql_res%d.txt" % i
}
sqls.append(sql)
i += 1
query_data = {
"filetype": "query",
"cfgdir": "/etc/perf",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"databases": "test",
"specified_table_query": {
"query_times": 100,
"concurrent": 1,
"sqls": sqls
}
}
query_json_file = f"/tmp/query.json"
with open(query_json_file, 'w') as f:
json.dump(query_data, f)
return query_json_file
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosdemo" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def getCMDOutput(self, cmd):
cmd = os.popen(cmd)
output = cmd.read()
cmd.close()
return output
def query(self):
cursor = self.conn.cursor()
print("==================== query performance ====================")
buildPath = self.getBuildPath()
if (buildPath == ""):
print("taosdemo not found!")
sys.exit(1)
binPath = buildPath + "/build/bin/"
os.system(
"%sperfMonitor -f %s > query_res.txt" %
(binPath, self.generateQueryJson()))
cursor = self.conn2.cursor()
print("==================== query performance ====================")
cursor.execute("use %s" % self.dbName)
cursor.execute("select tbname, query_id, query_sql from %s" % self.stbName)
cursor.execute("select tbname, query_sql from %s" % self.stbName)
i = 0
for data in cursor:
table_name = data[0]
query_id = data[1]
sql = data[2]
sql = data[1]
self.avgDelay = self.getCMDOutput("grep 'avgDelay' query_res.txt | awk 'NR==%d{print $2}'" % (i + 1))
self.maxDelay = self.getCMDOutput("grep 'avgDelay' query_res.txt | awk 'NR==%d{print $5}'" % (i + 1))
self.minDelay = self.getCMDOutput("grep 'avgDelay' query_res.txt | awk 'NR==%d{print $8}'" % (i + 1))
i += 1
print("query time for: %s %f seconds" % (sql, float(self.avgDelay)))
c = self.conn2.cursor()
c.execute("insert into %s.%s values(now, %f, %f, %f, '%s', '%s', '%s')" % (self.dbName, table_name, float(self.avgDelay), float(self.maxDelay), float(self.minDelay), self.commitID, self.branch, self.type))
totalTime = 0
cursor2 = self.conn.cursor()
cursor2.execute("use test")
for i in range(100):
if(self.clearCache == True):
# root permission is required
os.system("echo 3 > /proc/sys/vm/drop_caches")
startTime = time.time()
cursor2.execute(sql)
totalTime += time.time() - startTime
cursor2.close()
print("query time for: %s %f seconds" % (sql, totalTime / 100))
cursor3 = self.conn2.cursor()
cursor3.execute("insert into %s.%s values(now, %f, '%s', '%s', '%s')" % (self.dbName, table_name, totalTime / 100, self.commitID, self.branch, self.type))
cursor3.close()
c.close()
cursor.close()
if __name__ == '__main__':
......@@ -174,4 +234,4 @@ if __name__ == '__main__':
args = parser.parse_args()
perftest = taosdemoQueryPerformace(args.remove_cache, args.commit_id, args.database_name, args.stable_name, args.table_perfix, args.git_branch, args.build_type)
perftest.createPerfTables()
perftest.query()
perftest.query()
\ No newline at end of file
......@@ -49,24 +49,18 @@ class taosdemoPerformace:
def generateJson(self):
db = {
"name": "%s" % self.insertDB,
"drop": "yes",
"replica": 1
"drop": "yes"
}
stb = {
"name": "meters",
"child_table_exists": "no",
"childtable_count": self.numOfTables,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"data_source": "rand",
"batch_create_tbl_num": 10,
"insert_mode": "taosc",
"insert_mode": "rand",
"insert_rows": self.numOfRows,
"interlace_rows": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"batch_rows": 1000000,
"max_sql_len": 1048576,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
......@@ -100,11 +94,8 @@ class taosdemoPerformace:
"user": "root",
"password": "taosdata",
"thread_count": 10,
"thread_count_create_tbl": 10,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"num_of_records_per_req": 30000,
"databases": [db]
}
......@@ -145,7 +136,7 @@ class taosdemoPerformace:
binPath = buildPath + "/build/bin/"
os.system(
"%staosdemo -f %s > /dev/null 2>&1" %
"%sperfMonitor -f %s > /dev/null 2>&1" %
(binPath, self.generateJson()))
self.createTableTime = self.getCMDOutput(
"grep 'Spent' insert_res.txt | awk 'NR==1{print $2}'")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册