提交 15d8ae0f 编写于 作者: H Haojun Liao

other: merge 3.0

......@@ -174,6 +174,8 @@ cmake .. -G "NMake Makefiles"
nmake
```
如果你使用的是 Visual Studio 2022 版本, 脚本 `vcvarsall.bat` 的默认安装路径是 `C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvarsall.bat`
### Mac OS X 系统
安装 Xcode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。
......
......@@ -136,7 +136,7 @@ cmake .. -DCPUTYPE=mips64 && cmake --build .
### On Windows platform
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
If you use Visual Studio 2013, please open a command window by executing "cmd.exe".
Please specify "amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```cmd
mkdir debug && cd debug
......@@ -145,7 +145,7 @@ cmake .. -G "NMake Makefiles"
nmake
```
If you use the Visual Studio 2019 or 2017:
If you use Visual Studio 2019 or 2017:
please open a command window by executing "cmd.exe".
Please specify "x64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
......@@ -164,6 +164,8 @@ cmake .. -G "NMake Makefiles"
nmake
```
If you use Visual Studio 2022, the only change is the default path of `vcvarsall.bat`, which is `C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvarsall.bat`.
### On Mac OS X platform
Please install XCode command line tools and cmake. Verified with XCode 11.4+ on Catalina and Big Sur.
......
......@@ -118,6 +118,7 @@ execute_process(COMMAND "${CMAKE_COMMAND}" --build .
# ================================================================================================
# googletest
if(${BUILD_TEST})
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
add_subdirectory(googletest EXCLUDE_FROM_ALL)
target_include_directories(
gtest
......@@ -259,7 +260,7 @@ if(${BUILD_MSVCREGEX})
SET_TARGET_PROPERTIES(msvcregex PROPERTIES OUTPUT_NAME msvcregex)
endif(${BUILD_MSVCREGEX})
# msvcregex
# wcwidth
if(${BUILD_WCWIDTH})
add_library(wcwidth STATIC "")
target_sources(wcwidth
......
......@@ -55,8 +55,9 @@ extern int32_t tMsgDict[];
#define TMSG_SEG_CODE(TYPE) (((TYPE)&0xff00) >> 8)
#define TMSG_SEG_SEQ(TYPE) ((TYPE)&0xff)
#define TMSG_INFO(TYPE) tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)]
#define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE))
#define TMSG_INFO(TYPE) \
(((TYPE) >= 0 && (TYPE) < TDMT_MAX) ? tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] : 0)
#define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE))
typedef uint16_t tmsg_t;
......
......@@ -82,6 +82,7 @@ typedef struct SScanLogicNode {
typedef struct SJoinLogicNode {
SLogicNode node;
EJoinType joinType;
SNode* pMergeCondition;
SNode* pOnConditions;
bool isSingleTableJoin;
} SJoinLogicNode;
......@@ -329,6 +330,7 @@ typedef struct SInterpFuncPhysiNode {
typedef struct SJoinPhysiNode {
SPhysiNode node;
EJoinType joinType;
SNode* pMergeCondition;
SNode* pOnConditions;
SNodeList* pTargets;
} SJoinPhysiNode;
......
......@@ -376,6 +376,7 @@ void nodesRewriteSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeRewrit
typedef enum ECollectColType { COLLECT_COL_TYPE_COL = 1, COLLECT_COL_TYPE_TAG, COLLECT_COL_TYPE_ALL } ECollectColType;
int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char* pTableAlias, ECollectColType type,
SNodeList** pCols);
int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, ECollectColType type, SNodeList** pCols);
typedef bool (*FFuncClassifier)(int32_t funcId);
int32_t nodesCollectFuncs(SSelectStmt* pSelect, ESqlClause clause, FFuncClassifier classifier, SNodeList** pFuncs);
......
......@@ -274,15 +274,14 @@ static void mndGetDnodeData(SMnode *pMnode, SArray *pDnodeEps) {
SDnodeEp dnodeEp = {0};
dnodeEp.id = pDnode->id;
dnodeEp.isMnode = 0;
dnodeEp.ep.port = pDnode->port;
memcpy(dnodeEp.ep.fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
sdbRelease(pSdb, pDnode);
dnodeEp.isMnode = 0;
if (mndIsMnode(pMnode, pDnode->id)) {
dnodeEp.isMnode = 1;
}
sdbRelease(pSdb, pDnode);
taosArrayPush(pDnodeEps, &dnodeEp);
}
}
......
......@@ -519,6 +519,8 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) {
}
}
syncNodeRelease(pSyncNode);
if (code != 0) {
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
}
......
......@@ -131,7 +131,7 @@ int32_t sdbSetTable(SSdb *pSdb, SSdbTable table) {
hashType = TSDB_DATA_TYPE_BINARY;
}
SHashObj *hash = taosHashInit(64, taosGetDefaultHashFunction(hashType), true, HASH_NO_LOCK);
SHashObj *hash = taosHashInit(64, taosGetDefaultHashFunction(hashType), true, HASH_ENTRY_LOCK);
if (hash == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
......
......@@ -79,7 +79,7 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
SJson *pNodeRetentions = tjsonCreateArray();
tjsonAddItemToObject(pJson, "retentions", pNodeRetentions);
for (int32_t i = 0; i < nRetention; ++i) {
SJson *pNodeRetention = tjsonCreateObject();
SJson * pNodeRetention = tjsonCreateObject();
const SRetention *pRetention = pCfg->tsdbCfg.retentions + i;
tjsonAddIntegerToObject(pNodeRetention, "freq", pRetention->freq);
tjsonAddIntegerToObject(pNodeRetention, "freqUnit", pRetention->freqUnit);
......@@ -156,7 +156,7 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "keep2", pCfg->tsdbCfg.keep2, code);
if (code < 0) return -1;
SJson *pNodeRetentions = tjsonGetObjectItem(pJson, "retentions");
SJson * pNodeRetentions = tjsonGetObjectItem(pJson, "retentions");
int32_t nRetention = tjsonGetArraySize(pNodeRetentions);
if (nRetention > TSDB_RETENTION_MAX) {
nRetention = TSDB_RETENTION_MAX;
......
......@@ -27,10 +27,10 @@ int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg) {
SMetaReader mer1 = {0};
SMetaReader mer2 = {0};
char tableFName[TSDB_TABLE_FNAME_LEN];
SRpcMsg rpcMsg;
SRpcMsg rpcMsg = {0};
int32_t code = 0;
int32_t rspLen = 0;
void *pRsp = NULL;
void * pRsp = NULL;
SSchemaWrapper schema = {0};
SSchemaWrapper schemaTag = {0};
......@@ -111,6 +111,7 @@ _exit:
rpcMsg.pCont = pRsp;
rpcMsg.contLen = rspLen;
rpcMsg.code = code;
rpcMsg.msgType = pMsg->msgType;
if (code) {
qError("get table %s meta failed cause of %s", infoReq.tbName, tstrerror(code));
......@@ -130,10 +131,10 @@ int vnodeGetTableCfg(SVnode *pVnode, SRpcMsg *pMsg) {
SMetaReader mer1 = {0};
SMetaReader mer2 = {0};
char tableFName[TSDB_TABLE_FNAME_LEN];
SRpcMsg rpcMsg;
SRpcMsg rpcMsg = {0};
int32_t code = 0;
int32_t rspLen = 0;
void *pRsp = NULL;
void * pRsp = NULL;
SSchemaWrapper schema = {0};
SSchemaWrapper schemaTag = {0};
......@@ -220,6 +221,7 @@ _exit:
rpcMsg.pCont = pRsp;
rpcMsg.contLen = rspLen;
rpcMsg.code = code;
rpcMsg.msgType = pMsg->msgType;
if (code) {
qError("get table %s cfg failed cause of %s", cfgReq.tbName, tstrerror(code));
......
......@@ -51,6 +51,13 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int
#define NEEDTO_COMPRESS_QUERY(size) ((size) > tsCompressColData ? 1 : 0)
#define START_TS_COLUMN_INDEX 0
#define END_TS_COLUMN_INDEX 1
#define UID_COLUMN_INDEX 2
#define GROUPID_COLUMN_INDEX UID_COLUMN_INDEX
#define DELETE_GROUPID_COLUMN_INDEX 2
enum {
// when this task starts to execute, this status will set
TASK_NOT_COMPLETED = 0x1u,
......@@ -373,6 +380,8 @@ typedef struct SStreamBlockScanInfo {
int32_t scanWinIndex; // for state operator
int32_t pullDataResIndex;
SSDataBlock* pPullDataRes; // pull data SSDataBlock
SSDataBlock* pDeleteDataRes; // delete data SSDataBlock
int32_t deleteDataIndex;
} SStreamBlockScanInfo;
typedef struct SSysTableScanInfo {
......@@ -438,6 +447,10 @@ typedef struct SIntervalAggOperatorInfo {
bool invertible;
SArray* pPrevValues; // SArray<SGroupKeys> used to keep the previous not null value for interpolation.
bool ignoreExpiredData;
SArray* pRecycledPages;
SArray* pDelWins; // SWinRes
int32_t delIndex;
SSDataBlock* pDelRes;
} SIntervalAggOperatorInfo;
typedef struct SStreamFinalIntervalOperatorInfo {
......@@ -460,6 +473,10 @@ typedef struct SStreamFinalIntervalOperatorInfo {
int32_t pullIndex;
SSDataBlock* pPullDataRes;
bool ignoreExpiredData;
SArray* pRecycledPages;
SArray* pDelWins; // SWinRes
int32_t delIndex;
SSDataBlock* pDelRes;
} SStreamFinalIntervalOperatorInfo;
typedef struct SAggOperatorInfo {
......@@ -689,7 +706,7 @@ typedef struct SJoinOperatorInfo {
SSDataBlock *pRight;
int32_t rightPos;
SColumnInfo rightCol;
SNode *pOnCondition;
SNode *pCondAfterMerge;
} SJoinOperatorInfo;
#define OPTR_IS_OPENED(_optr) (((_optr)->status & OP_OPENED) == OP_OPENED)
......
......@@ -53,13 +53,28 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;
SNode* pOnCondition = pJoinNode->pOnConditions;
if (nodeType(pOnCondition) == QUERY_NODE_OPERATOR) {
SOperatorNode* pNode = (SOperatorNode*)pOnCondition;
SNode* pMergeCondition = pJoinNode->pMergeCondition;
if (nodeType(pMergeCondition) == QUERY_NODE_OPERATOR) {
SOperatorNode* pNode = (SOperatorNode*)pMergeCondition;
setJoinColumnInfo(&pInfo->leftCol, (SColumnNode*)pNode->pLeft);
setJoinColumnInfo(&pInfo->rightCol, (SColumnNode*)pNode->pRight);
} else if (nodeType(pOnCondition) == QUERY_NODE_LOGIC_CONDITION) {
extractTimeCondition(pInfo, (SLogicConditionNode*)pOnCondition);
} else {
ASSERT(false);
}
if (pJoinNode->pOnConditions != NULL && pJoinNode->node.pConditions != NULL) {
pInfo->pCondAfterMerge = nodesMakeNode(QUERY_NODE_LOGIC_CONDITION);
SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(pInfo->pCondAfterMerge);
pLogicCond->pParameterList = nodesMakeList();
nodesListMakeAppend(&pLogicCond->pParameterList, nodesCloneNode(pJoinNode->pOnConditions));
nodesListMakeAppend(&pLogicCond->pParameterList, nodesCloneNode(pJoinNode->node.pConditions));
pLogicCond->condType = LOGIC_COND_TYPE_AND;
} else if (pJoinNode->pOnConditions != NULL) {
pInfo->pCondAfterMerge = nodesCloneNode(pJoinNode->pOnConditions);
} else if (pJoinNode->node.pConditions != NULL) {
pInfo->pCondAfterMerge = nodesCloneNode(pJoinNode->node.pConditions);
} else {
pInfo->pCondAfterMerge = NULL;
}
pOperator->fpSet =
......@@ -88,15 +103,12 @@ void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode) {
void destroyMergeJoinOperator(void* param, int32_t numOfOutput) {
SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param;
nodesDestroyNode(pJoinOperator->pCondAfterMerge);
}
SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) {
static void doMergeJoinImpl(struct SOperatorInfo* pOperator, SSDataBlock* pRes) {
SJoinOperatorInfo* pJoinInfo = pOperator->info;
SSDataBlock* pRes = pJoinInfo->pRes;
blockDataCleanup(pRes);
blockDataEnsureCapacity(pRes, 4096);
int32_t nrows = 0;
while (1) {
......@@ -181,7 +193,28 @@ SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) {
break;
}
}
}
SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator) {
SJoinOperatorInfo* pJoinInfo = pOperator->info;
SSDataBlock* pRes = pJoinInfo->pRes;
blockDataCleanup(pRes);
blockDataEnsureCapacity(pRes, 4096);
while (true) {
int32_t numOfRowsBefore = pRes->info.rows;
doMergeJoinImpl(pOperator, pRes);
int32_t numOfNewRows = pRes->info.rows - numOfRowsBefore;
if (numOfNewRows == 0) {
break;
}
if (pJoinInfo->pCondAfterMerge != NULL) {
doFilter(pJoinInfo->pCondAfterMerge, pRes);
}
if (pRes->info.rows >= pOperator->resultInfo.threshold) {
break;
}
}
return (pRes->info.rows > 0) ? pRes : NULL;
}
......
......@@ -799,18 +799,40 @@ static bool isStateWindow(SStreamBlockScanInfo* pInfo) {
return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
}
static uint64_t getGroupId(SOperatorInfo* pOperator, uint64_t uid) {
uint64_t* groupId = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &uid, sizeof(int64_t));
if (groupId) {
return *groupId;
}
return 0;
/* Todo(liuyao) for partition by column
recordNewGroupKeys(pTableScanInfo->pGroupCols, pTableScanInfo->pGroupColVals, pBlock, rowId);
int32_t len = buildGroupKeys(pTableScanInfo->keyBuf, pTableScanInfo->pGroupColVals);
uint64_t resId = 0;
uint64_t* groupId = taosHashGet(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len);
if (groupId) {
return *groupId;
} else if (len != 0) {
resId = calcGroupId(pTableScanInfo->keyBuf, len);
taosHashPut(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len, &resId, sizeof(uint64_t));
}
return resId;
*/
}
static void setGroupId(SStreamBlockScanInfo* pInfo, SSDataBlock* pBlock, int32_t groupColIndex, int32_t rowIndex) {
ASSERT(rowIndex < pBlock->info.rows);
switch (pBlock->info.type) {
case STREAM_RETRIEVE: {
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, groupColIndex);
uint64_t* groupCol = (uint64_t*)pColInfo->pData;
pInfo->groupId = groupCol[rowIndex];
} break;
case STREAM_DELETE_DATA:
break;
default:
break;
switch (pBlock->info.type)
{
case STREAM_DELETE_DATA:
case STREAM_RETRIEVE: {
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, groupColIndex);
uint64_t* groupCol = (uint64_t*)pColInfo->pData;
pInfo->groupId = groupCol[rowIndex];
}
break;
default:
break;
}
}
......@@ -830,14 +852,14 @@ static bool prepareDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, int3
int64_t gap = pInfo->sessionSup.gap;
int32_t winIndex = 0;
SResultWindowInfo* pCurWin =
getSessionTimeWindow(pAggSup, tsCols[(*pRowIndex)], INT64_MIN, pSDB->info.groupId, gap, &winIndex);
getSessionTimeWindow(pAggSup, tsCols[*pRowIndex], INT64_MIN, pSDB->info.groupId, gap, &winIndex);
win = pCurWin->win;
(*pRowIndex) += updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, (*pRowIndex), gap, NULL);
(*pRowIndex) += updateSessionWindowInfo(pCurWin, tsCols, NULL, pSDB->info.rows, *pRowIndex, gap, NULL);
} else {
win =
getActiveTimeWindow(NULL, &dumyInfo, tsCols[(*pRowIndex)], &pInfo->interval, pInfo->interval.precision, NULL);
setGroupId(pInfo, pSDB, 2, *pRowIndex);
(*pRowIndex) += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, (*pRowIndex), win.ekey, binarySearchForKey, NULL,
getActiveTimeWindow(NULL, &dumyInfo, tsCols[*pRowIndex], &pInfo->interval, pInfo->interval.precision, NULL);
setGroupId(pInfo, pSDB, GROUPID_COLUMN_INDEX, *pRowIndex);
(*pRowIndex) += getNumOfRowsInTimeWindow(&pSDB->info, tsCols, *pRowIndex, win.ekey, binarySearchForKey, NULL,
TSDB_ORDER_ASC);
}
needRead = true;
......@@ -881,27 +903,6 @@ static void copyOneRow(SSDataBlock* dest, SSDataBlock* source, int32_t sourceRow
dest->info.rows++;
}
static uint64_t getGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t rowId) {
uint64_t* groupId = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
if (groupId) {
return *groupId;
}
return 0;
/* Todo(liuyao) for partition by column
recordNewGroupKeys(pTableScanInfo->pGroupCols, pTableScanInfo->pGroupColVals, pBlock, rowId);
int32_t len = buildGroupKeys(pTableScanInfo->keyBuf, pTableScanInfo->pGroupColVals);
uint64_t resId = 0;
uint64_t* groupId = taosHashGet(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len);
if (groupId) {
return *groupId;
} else if (len != 0) {
resId = calcGroupId(pTableScanInfo->keyBuf, len);
taosHashPut(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len, &resId, sizeof(uint64_t));
}
return resId;
*/
}
static SSDataBlock* doDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, int32_t tsColIndex, int32_t* pRowIndex) {
while (1) {
SSDataBlock* pResult = NULL;
......@@ -925,7 +926,7 @@ static SSDataBlock* doDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, i
SSDataBlock* pBlock = createOneDataBlock(pResult, true);
blockDataCleanup(pResult);
for (int32_t i = 0; i < pBlock->info.rows; i++) {
uint64_t id = getGroupId(pInfo->pOperatorDumy, pBlock, i);
uint64_t id = getGroupId(pInfo->pOperatorDumy, pBlock->info.uid);
if (id == pInfo->groupId) {
copyOneRow(pResult, pBlock, i);
}
......@@ -934,6 +935,40 @@ static SSDataBlock* doDataScan(SStreamBlockScanInfo* pInfo, SSDataBlock* pSDB, i
*/
}
static void copyDeleteDataBlock(SStreamBlockScanInfo* pInfo, SSDataBlock* pDelBlock, SOperatorInfo* pOperator, SSDataBlock* pUpdateRes) {
if (pDelBlock->info.rows == 0) {
return;
}
blockDataCleanup(pUpdateRes);
blockDataEnsureCapacity(pUpdateRes, 64);
ASSERT(taosArrayGetSize(pDelBlock->pDataBlock) >= 3);
SColumnInfoData* pStartTsCol = taosArrayGet(pDelBlock->pDataBlock, START_TS_COLUMN_INDEX);
TSKEY* startData = (TSKEY*)pStartTsCol->pData;
SColumnInfoData* pEndTsCol = taosArrayGet(pDelBlock->pDataBlock, END_TS_COLUMN_INDEX);
TSKEY* endData = (TSKEY*)pEndTsCol->pData;
SColumnInfoData* pGpCol = taosArrayGet(pDelBlock->pDataBlock, UID_COLUMN_INDEX);
uint64_t* uidCol = (uint64_t*)pGpCol->pData;
SColumnInfoData* pDestTsCol = taosArrayGet(pUpdateRes->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pDestGpCol = taosArrayGet(pUpdateRes->pDataBlock, DELETE_GROUPID_COLUMN_INDEX);
for (int32_t i = pInfo->deleteDataIndex ; i < pDelBlock->info.rows &&
i < pDelBlock->info.capacity - (endData[i] - startData[i])/pInfo->interval.interval - 1; i++) {
uint64_t groupId = getGroupId(pOperator, uidCol[i]);
for (TSKEY startTs = startData[i]; startTs <= endData[i]; ) {
colDataAppend(pDestTsCol, pUpdateRes->info.rows, (const char*)&startTs, false);
colDataAppend(pDestGpCol, pUpdateRes->info.rows, (const char*)&groupId, false);
pUpdateRes->info.rows++;
startTs = taosTimeAdd(startTs, pInfo->interval.interval, pInfo->interval.intervalUnit, pInfo->interval.precision);
}
pInfo->deleteDataIndex++;
}
if (pInfo->deleteDataIndex > 0 && pInfo->deleteDataIndex == pDelBlock->info.rows) {
blockDataCleanup(pDelBlock);
pInfo->deleteDataIndex = 0;
}
}
static void setUpdateData(SStreamBlockScanInfo* pInfo, SSDataBlock* pBlock, SSDataBlock* pUpdateBlock) {
blockDataCleanup(pUpdateBlock);
int32_t size = taosArrayGetSize(pInfo->tsArray);
......@@ -943,11 +978,11 @@ static void setUpdateData(SStreamBlockScanInfo* pInfo, SSDataBlock* pBlock, SSDa
blockDataEnsureCapacity(pUpdateBlock, size);
int32_t rowId = *(int32_t*)taosArrayGet(pInfo->tsArray, pInfo->tsArrayIndex);
pInfo->groupId = getGroupId(pInfo->pSnapshotReadOp, pBlock, rowId);
pInfo->groupId = getGroupId(pInfo->pSnapshotReadOp, pBlock->info.uid);
int32_t i = 0;
for (; i < size; i++) {
rowId = *(int32_t*)taosArrayGet(pInfo->tsArray, i + pInfo->tsArrayIndex);
uint64_t id = getGroupId(pInfo->pSnapshotReadOp, pBlock, rowId);
uint64_t id = getGroupId(pInfo->pSnapshotReadOp, pBlock->info.uid);
if (pInfo->groupId != id) {
break;
}
......@@ -964,28 +999,32 @@ static void setUpdateData(SStreamBlockScanInfo* pInfo, SSDataBlock* pBlock, SSDa
if (size > 0 && pInfo->tsArrayIndex == size) {
taosArrayClear(pInfo->tsArray);
}
if (size == 0) {
copyDeleteDataBlock(pInfo, pInfo->pDeleteDataRes, pInfo->pSnapshotReadOp, pUpdateBlock);
}
}
static void getUpdateDataBlock(SStreamBlockScanInfo* pInfo, bool invertible, SSDataBlock* pBlock,
SSDataBlock* pUpdateBlock) {
static void checkUpdateData(SStreamBlockScanInfo* pInfo, bool invertible, SSDataBlock* pBlock,
bool out) {
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
ASSERT(pColDataInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP);
TSKEY* ts = (TSKEY*)pColDataInfo->pData;
for (int32_t rowId = 0; rowId < pBlock->info.rows; rowId++) {
if (updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, ts[rowId])) {
if (updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, ts[rowId]) && out) {
taosArrayPush(pInfo->tsArray, &rowId);
}
}
if (!pUpdateBlock) {
taosArrayClear(pInfo->tsArray);
return;
}
static void setBlockGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t uidColIndex) {
ASSERT(taosArrayGetSize(pBlock->pDataBlock) >= 3);
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, uidColIndex);
uint64_t* uidCol = (uint64_t*)pColDataInfo->pData;
ASSERT(pBlock->info.rows > 0);
for (int32_t i = 0 ; i < pBlock->info.rows; i++) {
uidCol[i] = getGroupId(pOperator, uidCol[i]);
}
setUpdateData(pInfo, pBlock, pUpdateBlock);
// Todo(liuyao) get from tsdb
// SSDataBlock* p = createOneDataBlock(pBlock, true);
// p->info.type = STREAM_INVERT;
// taosArrayClear(pInfo->tsArray);
// return p;
}
static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
......@@ -1010,13 +1049,29 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
int32_t current = pInfo->validBlockIndex++;
SSDataBlock* pBlock = taosArrayGetP(pInfo->pBlockLists, current);
blockDataUpdateTsWindow(pBlock, 0);
if (pBlock->info.type == STREAM_RETRIEVE) {
switch (pBlock->info.type) {
case STREAM_RETRIEVE:{
pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RETRIEVE;
copyDataBlock(pInfo->pPullDataRes, pBlock);
pInfo->pullDataResIndex = 0;
prepareDataScan(pInfo, pInfo->pPullDataRes, 0, &pInfo->pullDataResIndex);
prepareDataScan(pInfo, pInfo->pPullDataRes, START_TS_COLUMN_INDEX, &pInfo->pullDataResIndex);
updateInfoAddCloseWindowSBF(pInfo->pUpdateInfo);
}
break;
case STREAM_DELETE_DATA: {
pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
copyDataBlock(pInfo->pDeleteDataRes, pBlock);
copyDeleteDataBlock(pInfo, pInfo->pDeleteDataRes, pInfo->pSnapshotReadOp, pInfo->pUpdateRes);
pInfo->updateResIndex = 0;
prepareDataScan(pInfo, pInfo->pUpdateRes, START_TS_COLUMN_INDEX, &pInfo->updateResIndex);
pInfo->pUpdateRes->info.type = STREAM_DELETE_DATA;
return pInfo->pUpdateRes;
}
break;
default:
break;
}
return pBlock;
} else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) {
......@@ -1033,39 +1088,33 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
} else if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RETRIEVE) {
SSDataBlock* pSDB = doDataScan(pInfo, pInfo->pPullDataRes, 0, &pInfo->pullDataResIndex);
if (pSDB != NULL) {
getUpdateDataBlock(pInfo, true, pSDB, NULL);
checkUpdateData(pInfo, true, pSDB, false);
pSDB->info.type = STREAM_PULL_DATA;
return pSDB;
}
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
} else {
if (isStateWindow(pInfo)) {
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
pInfo->updateResIndex = pInfo->pUpdateRes->info.rows;
if (!prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex)) {
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
}
} else if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER) {
SSDataBlock* pSDB = doDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
if (pSDB) {
pSDB->info.type = STREAM_NORMAL;
checkUpdateData(pInfo, true, pSDB, false);
return pSDB;
}
if (pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER) {
SSDataBlock* pSDB = doDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
if (pSDB == NULL) {
setUpdateData(pInfo, pInfo->pRes, pInfo->pUpdateRes);
if (pInfo->pUpdateRes->info.rows > 0) {
if (!isStateWindow(pInfo)) {
// Todo(liuyao) mybe can delete this.
bool test = prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
ASSERT(test == false);
}
return pInfo->pUpdateRes;
} else {
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
}
} else {
pSDB->info.type = STREAM_NORMAL;
getUpdateDataBlock(pInfo, true, pSDB, NULL);
return pSDB;
}
setUpdateData(pInfo, pInfo->pRes, pInfo->pUpdateRes);
if (pInfo->pUpdateRes->info.rows > 0) {
prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex);
return pInfo->pUpdateRes;
}
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
} else if (isStateWindow(pInfo)) {
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER;
pInfo->updateResIndex = pInfo->pUpdateRes->info.rows;
if (prepareDataScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex)) {
ASSERT(pInfo->pUpdateRes->info.rows == 0);
// return empty data blcok
return pInfo->pUpdateRes;
}
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
}
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
......@@ -1159,7 +1208,8 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
pOperator->status = OP_EXEC_DONE;
} else if (pInfo->pUpdateInfo) {
pInfo->tsArrayIndex = 0;
getUpdateDataBlock(pInfo, true, pInfo->pRes, pInfo->pUpdateRes);
checkUpdateData(pInfo, true, pInfo->pRes, true);
setUpdateData(pInfo, pInfo->pRes, pInfo->pUpdateRes);
if (pInfo->pUpdateRes->info.rows > 0) {
if (pInfo->pUpdateRes->info.type == STREAM_CLEAR) {
pInfo->updateResIndex = 0;
......@@ -1170,9 +1220,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
}
}
}
return (pBlockInfo->rows == 0) ? NULL : pInfo->pRes;
} else if (pInfo->blockType == STREAM_INPUT__DATA_SCAN) {
// check reader last status
// if not match, reset status
......@@ -1285,6 +1333,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->groupId = 0;
pInfo->pPullDataRes = createPullDataBlock();
pInfo->pStreamScanOp = pOperator;
pInfo->deleteDataIndex = 0;
pInfo->pDeleteDataRes = createPullDataBlock();
pOperator->name = "StreamBlockScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
......
......@@ -15,10 +15,10 @@
#include "builtins.h"
#include "builtinsimpl.h"
#include "cJSON.h"
#include "querynodes.h"
#include "scalar.h"
#include "taoserror.h"
#include "cJSON.h"
static int32_t buildFuncErrMsg(char* pErrBuf, int32_t len, int32_t errCode, const char* pFormat, ...) {
va_list vArgList;
......@@ -815,13 +815,13 @@ static int8_t validateHistogramBinType(char* binTypeStr) {
}
static bool validateHistogramBinDesc(char* binDescStr, int8_t binType, char* errMsg, int32_t msgLen) {
const char *msg1 = "HISTOGRAM function requires four parameters";
const char *msg3 = "HISTOGRAM function invalid format for binDesc parameter";
const char *msg4 = "HISTOGRAM function binDesc parameter \"count\" should be in range [1, 1000]";
const char *msg5 = "HISTOGRAM function bin/parameter should be in range [-DBL_MAX, DBL_MAX]";
const char *msg6 = "HISTOGRAM function binDesc parameter \"width\" cannot be 0";
const char *msg7 = "HISTOGRAM function binDesc parameter \"start\" cannot be 0 with \"log_bin\" type";
const char *msg8 = "HISTOGRAM function binDesc parameter \"factor\" cannot be negative or equal to 0/1";
const char* msg1 = "HISTOGRAM function requires four parameters";
const char* msg3 = "HISTOGRAM function invalid format for binDesc parameter";
const char* msg4 = "HISTOGRAM function binDesc parameter \"count\" should be in range [1, 1000]";
const char* msg5 = "HISTOGRAM function bin/parameter should be in range [-DBL_MAX, DBL_MAX]";
const char* msg6 = "HISTOGRAM function binDesc parameter \"width\" cannot be 0";
const char* msg7 = "HISTOGRAM function binDesc parameter \"start\" cannot be 0 with \"log_bin\" type";
const char* msg8 = "HISTOGRAM function binDesc parameter \"factor\" cannot be negative or equal to 0/1";
cJSON* binDesc = cJSON_Parse(binDescStr);
int32_t numOfBins;
......@@ -1433,10 +1433,6 @@ static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t l
static int32_t translateFirstLastImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isPartial) {
// first(col_list) will be rewritten as first(col)
if (2 != LIST_LENGTH(pFunc->pParameterList)) { // input has two params c0,ts, is this a bug?
return TSDB_CODE_SUCCESS;
}
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
int32_t paraBytes = ((SExprNode*)pPara)->resType.bytes;
......
......@@ -719,8 +719,10 @@ int32_t avgFunction(SqlFunctionCtx* pCtx) {
ASSERT(numOfElem >= 0);
pAvgRes->count += numOfElem;
if (IS_INTEGER_TYPE(type)) {
if (IS_SIGNED_NUMERIC_TYPE(type)) {
pAvgRes->sum.isum += pAgg->sum;
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
pAvgRes->sum.usum += pAgg->sum;
} else if (IS_FLOAT_TYPE(type)) {
pAvgRes->sum.dsum += GET_DOUBLE_VAL((const char*)&(pAgg->sum));
}
......@@ -784,6 +786,64 @@ int32_t avgFunction(SqlFunctionCtx* pCtx) {
break;
}
case TSDB_DATA_TYPE_UTINYINT: {
uint8_t* plist = (uint8_t*)pCol->pData;
for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) {
if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) {
continue;
}
numOfElem += 1;
pAvgRes->count += 1;
pAvgRes->sum.usum += plist[i];
}
break;
}
case TSDB_DATA_TYPE_USMALLINT: {
uint16_t* plist = (uint16_t*)pCol->pData;
for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) {
if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) {
continue;
}
numOfElem += 1;
pAvgRes->count += 1;
pAvgRes->sum.usum += plist[i];
}
break;
}
case TSDB_DATA_TYPE_UINT: {
uint32_t* plist = (uint32_t*)pCol->pData;
for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) {
if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) {
continue;
}
numOfElem += 1;
pAvgRes->count += 1;
pAvgRes->sum.usum += plist[i];
}
break;
}
case TSDB_DATA_TYPE_UBIGINT: {
uint64_t* plist = (uint64_t*)pCol->pData;
for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) {
if (pCol->hasNull && colDataIsNull_f(pCol->nullbitmap, i)) {
continue;
}
numOfElem += 1;
pAvgRes->count += 1;
pAvgRes->sum.usum += plist[i];
}
break;
}
case TSDB_DATA_TYPE_FLOAT: {
float* plist = (float*)pCol->pData;
for (int32_t i = start; i < numOfRows + pInput->startRowIndex; ++i) {
......@@ -825,8 +885,10 @@ _avg_over:
static void avgTransferInfo(SAvgRes* pInput, SAvgRes* pOutput) {
pOutput->type = pInput->type;
if (IS_INTEGER_TYPE(pOutput->type)) {
if (IS_SIGNED_NUMERIC_TYPE(pOutput->type)) {
pOutput->sum.isum += pInput->sum.isum;
} else if (IS_UNSIGNED_NUMERIC_TYPE(pOutput->type)) {
pOutput->sum.usum += pInput->sum.usum;
} else {
pOutput->sum.dsum += pInput->sum.dsum;
}
......@@ -900,6 +962,22 @@ int32_t avgInvertFunction(SqlFunctionCtx* pCtx) {
LIST_AVG_N(pAvgRes->sum.isum, int64_t);
break;
}
case TSDB_DATA_TYPE_UTINYINT: {
LIST_AVG_N(pAvgRes->sum.usum, uint8_t);
break;
}
case TSDB_DATA_TYPE_USMALLINT: {
LIST_AVG_N(pAvgRes->sum.usum, uint16_t);
break;
}
case TSDB_DATA_TYPE_UINT: {
LIST_AVG_N(pAvgRes->sum.usum, uint32_t);
break;
}
case TSDB_DATA_TYPE_UBIGINT: {
LIST_AVG_N(pAvgRes->sum.usum, uint64_t);
break;
}
case TSDB_DATA_TYPE_FLOAT: {
LIST_AVG_N(pAvgRes->sum.dsum, float);
break;
......@@ -925,8 +1003,10 @@ int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
SAvgRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
if (IS_INTEGER_TYPE(type)) {
if (IS_SIGNED_NUMERIC_TYPE(type)) {
pDBuf->sum.isum += pSBuf->sum.isum;
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
pDBuf->sum.usum += pSBuf->sum.usum;
} else {
pDBuf->sum.dsum += pSBuf->sum.dsum;
}
......@@ -941,8 +1021,10 @@ int32_t avgFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SAvgRes* pAvgRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
int32_t type = pAvgRes->type;
if (IS_INTEGER_TYPE(type)) {
if (IS_SIGNED_NUMERIC_TYPE(type)) {
pAvgRes->result = pAvgRes->sum.isum / ((double)pAvgRes->count);
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
pAvgRes->result = pAvgRes->sum.usum / ((double)pAvgRes->count);
} else {
pAvgRes->result = pAvgRes->sum.dsum / ((double)pAvgRes->count);
}
......
......@@ -1565,6 +1565,10 @@ void constructUdfService(void *argsThread) {
//TODO return value of uv_run
uv_run(&udfc->uvLoop, UV_RUN_DEFAULT);
uv_loop_close(&udfc->uvLoop);
uv_walk(&udfc->uvLoop, udfUdfdCloseWalkCb, NULL);
uv_run(&udfc->uvLoop, UV_RUN_DEFAULT);
uv_loop_close(&udfc->uvLoop);
}
int32_t udfcOpen() {
......
......@@ -368,6 +368,7 @@ static int32_t logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) {
static int32_t logicJoinCopy(const SJoinLogicNode* pSrc, SJoinLogicNode* pDst) {
COPY_BASE_OBJECT_FIELD(node, logicNodeCopy);
COPY_SCALAR_FIELD(joinType);
CLONE_NODE_FIELD(pMergeCondition);
CLONE_NODE_FIELD(pOnConditions);
COPY_SCALAR_FIELD(isSingleTableJoin);
return TSDB_CODE_SUCCESS;
......
......@@ -1254,6 +1254,7 @@ static int32_t jsonToLogicPlan(const SJson* pJson, void* pObj) {
static const char* jkJoinLogicPlanJoinType = "JoinType";
static const char* jkJoinLogicPlanOnConditions = "OnConditions";
static const char* jkJoinLogicPlanMergeCondition = "MergeConditions";
static int32_t logicJoinNodeToJson(const void* pObj, SJson* pJson) {
const SJoinLogicNode* pNode = (const SJoinLogicNode*)pObj;
......@@ -1262,6 +1263,9 @@ static int32_t logicJoinNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkJoinLogicPlanJoinType, pNode->joinType);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkJoinLogicPlanMergeCondition, nodeToJson, pNode->pMergeCondition);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkJoinLogicPlanOnConditions, nodeToJson, pNode->pOnConditions);
}
......@@ -1617,6 +1621,7 @@ static int32_t jsonToPhysiProjectNode(const SJson* pJson, void* pObj) {
}
static const char* jkJoinPhysiPlanJoinType = "JoinType";
static const char* jkJoinPhysiPlanMergeCondition = "MergeCondition";
static const char* jkJoinPhysiPlanOnConditions = "OnConditions";
static const char* jkJoinPhysiPlanTargets = "Targets";
......@@ -1627,6 +1632,9 @@ static int32_t physiJoinNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkJoinPhysiPlanJoinType, pNode->joinType);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkJoinPhysiPlanMergeCondition, nodeToJson, pNode->pMergeCondition);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkJoinPhysiPlanOnConditions, nodeToJson, pNode->pOnConditions);
}
......@@ -1648,6 +1656,9 @@ static int32_t jsonToPhysiJoinNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkJoinPhysiPlanOnConditions, &pNode->pOnConditions);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkJoinPhysiPlanMergeCondition, &pNode->pMergeCondition);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkJoinPhysiPlanTargets, &pNode->pTargets);
}
......
......@@ -470,6 +470,9 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: {
SJoinPhysiNode* pJoin = (SJoinPhysiNode*)pNode;
res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext);
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
res = walkPhysiPlan(pJoin->pMergeCondition, order, walker, pContext);
}
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
res = walkPhysiPlan(pJoin->pOnConditions, order, walker, pContext);
}
......
......@@ -722,6 +722,7 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_LOGIC_PLAN_JOIN: {
SJoinLogicNode* pLogicNode = (SJoinLogicNode*)pNode;
destroyLogicNode((SLogicNode*)pLogicNode);
nodesDestroyNode(pLogicNode->pMergeCondition);
nodesDestroyNode(pLogicNode->pOnConditions);
break;
}
......@@ -832,6 +833,7 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN: {
SJoinPhysiNode* pPhyNode = (SJoinPhysiNode*)pNode;
destroyPhysiNode((SPhysiNode*)pPhyNode);
nodesDestroyNode(pPhyNode->pMergeCondition);
nodesDestroyNode(pPhyNode->pOnConditions);
nodesDestroyList(pPhyNode->pTargets);
break;
......@@ -1497,6 +1499,38 @@ int32_t nodesCollectColumns(SSelectStmt* pSelect, ESqlClause clause, const char*
return TSDB_CODE_SUCCESS;
}
int32_t nodesCollectColumnsFromNode(SNode* node, const char* pTableAlias, ECollectColType type, SNodeList** pCols) {
if (NULL == pCols) {
return TSDB_CODE_FAILED;
}
SCollectColumnsCxt cxt = {
.errCode = TSDB_CODE_SUCCESS,
.pTableAlias = pTableAlias,
.collectType = type,
.pCols = (NULL == *pCols ? nodesMakeList() : *pCols),
.pColHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK)};
if (NULL == cxt.pCols || NULL == cxt.pColHash) {
return TSDB_CODE_OUT_OF_MEMORY;
}
*pCols = NULL;
nodesWalkExpr(node, collectColumns, &cxt);
taosHashCleanup(cxt.pColHash);
if (TSDB_CODE_SUCCESS != cxt.errCode) {
nodesDestroyList(cxt.pCols);
return cxt.errCode;
}
if (LIST_LENGTH(cxt.pCols) > 0) {
*pCols = cxt.pCols;
} else {
nodesDestroyList(cxt.pCols);
}
return TSDB_CODE_SUCCESS;
}
typedef struct SCollectFuncsCxt {
int32_t errCode;
FFuncClassifier classifier;
......
......@@ -385,6 +385,15 @@ SNode* createLogicConditionNode(SAstCreateContext* pCxt, ELogicConditionType typ
SNode* createOperatorNode(SAstCreateContext* pCxt, EOperatorType type, SNode* pLeft, SNode* pRight) {
CHECK_PARSER_STATUS(pCxt);
if (OP_TYPE_MINUS == type && QUERY_NODE_VALUE == nodeType(pLeft)) {
SValueNode* pVal = (SValueNode*)pLeft;
char* pNewLiteral = taosMemoryCalloc(1, strlen(pVal->literal) + 2);
CHECK_OUT_OF_MEM(pNewLiteral);
sprintf(pNewLiteral, "-%s", pVal->literal);
taosMemoryFree(pVal->literal);
pVal->literal = pNewLiteral;
return pLeft;
}
SOperatorNode* op = (SOperatorNode*)nodesMakeNode(QUERY_NODE_OPERATOR);
CHECK_OUT_OF_MEM(op);
op->opType = type;
......
......@@ -558,11 +558,11 @@ static void setColumnInfoByExpr(STempTableNode* pTable, SExprNode* pExpr, SColum
pCol->node.resType = pExpr->resType;
}
static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* pTable, SNodeList* pList) {
static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* pTable, bool igTags, SNodeList* pList) {
if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) {
const STableMeta* pMeta = ((SRealTableNode*)pTable)->pMeta;
int32_t nums =
pMeta->tableInfo.numOfColumns + ((TSDB_SUPER_TABLE == pMeta->tableType) ? pMeta->tableInfo.numOfTags : 0);
int32_t nums = pMeta->tableInfo.numOfColumns +
(igTags ? 0 : ((TSDB_SUPER_TABLE == pMeta->tableType) ? pMeta->tableInfo.numOfTags : 0));
for (int32_t i = 0; i < nums; ++i) {
SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
if (NULL == pCol) {
......@@ -878,6 +878,9 @@ static EDealRes translateNormalValue(STranslateContext* pCxt, SValueNode* pVal,
}
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_VARBINARY: {
if (strict && (pVal->node.resType.bytes > targetDt.bytes - VARSTR_HEADER_SIZE)) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal);
}
pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1);
if (NULL == pVal->datum.p) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY);
......@@ -1333,6 +1336,9 @@ static int32_t rewriteSystemInfoFuncImpl(STranslateContext* pCxt, char* pLiteral
pVal->isNull = true;
} else {
pVal->literal = pLiteral;
if (IS_VAR_DATA_TYPE(pVal->node.resType.type)) {
pVal->node.resType.bytes = strlen(pLiteral);
}
}
if (DEAL_RES_ERROR != translateValue(pCxt, pVal)) {
*pNode = (SNode*)pVal;
......@@ -1928,7 +1934,7 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
return code;
}
static int32_t createAllColumns(STranslateContext* pCxt, SNodeList** pCols) {
static int32_t createAllColumns(STranslateContext* pCxt, bool igTags, SNodeList** pCols) {
*pCols = nodesMakeList();
if (NULL == *pCols) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_OUT_OF_MEMORY);
......@@ -1937,7 +1943,7 @@ static int32_t createAllColumns(STranslateContext* pCxt, SNodeList** pCols) {
size_t nums = taosArrayGetSize(pTables);
for (size_t i = 0; i < nums; ++i) {
STableNode* pTable = taosArrayGetP(pTables, i);
int32_t code = createColumnsByTable(pCxt, pTable, *pCols);
int32_t code = createColumnsByTable(pCxt, pTable, igTags, *pCols);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
......@@ -1974,7 +1980,7 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) {
return (SNode*)pFunc;
}
static int32_t createTableAllCols(STranslateContext* pCxt, SColumnNode* pCol, SNodeList** pOutput) {
static int32_t createTableAllCols(STranslateContext* pCxt, SColumnNode* pCol, bool igTags, SNodeList** pOutput) {
STableNode* pTable = NULL;
int32_t code = findTable(pCxt, pCol->tableAlias, &pTable);
if (TSDB_CODE_SUCCESS == code && NULL == *pOutput) {
......@@ -1984,7 +1990,7 @@ static int32_t createTableAllCols(STranslateContext* pCxt, SColumnNode* pCol, SN
}
}
if (TSDB_CODE_SUCCESS == code) {
code = createColumnsByTable(pCxt, pTable, *pOutput);
code = createColumnsByTable(pCxt, pTable, igTags, *pOutput);
}
return code;
}
......@@ -2006,11 +2012,9 @@ static int32_t createMultiResFuncsParas(STranslateContext* pCxt, SNodeList* pSrc
SNode* pPara = NULL;
FOREACH(pPara, pSrcParas) {
if (isStar(pPara)) {
code = createAllColumns(pCxt, &pExprs);
// The syntax definition ensures that * and other parameters do not appear at the same time
break;
code = createAllColumns(pCxt, true, &pExprs);
} else if (isTableStar(pPara)) {
code = createTableAllCols(pCxt, (SColumnNode*)pPara, &pExprs);
code = createTableAllCols(pCxt, (SColumnNode*)pPara, true, &pExprs);
} else {
code = nodesListMakeStrictAppend(&pExprs, nodesCloneNode(pPara));
}
......@@ -2069,7 +2073,7 @@ static int32_t translateStar(STranslateContext* pCxt, SSelectStmt* pSelect) {
int32_t code = TSDB_CODE_SUCCESS;
if (isStar(pNode)) {
SNodeList* pCols = NULL;
code = createAllColumns(pCxt, &pCols);
code = createAllColumns(pCxt, false, &pCols);
if (TSDB_CODE_SUCCESS == code) {
INSERT_LIST(pSelect->pProjectionList, pCols);
ERASE_NODE(pSelect->pProjectionList);
......@@ -2085,7 +2089,7 @@ static int32_t translateStar(STranslateContext* pCxt, SSelectStmt* pSelect) {
}
} else if (isTableStar(pNode)) {
SNodeList* pCols = NULL;
code = createTableAllCols(pCxt, (SColumnNode*)pNode, &pCols);
code = createTableAllCols(pCxt, (SColumnNode*)pNode, false, &pCols);
if (TSDB_CODE_SUCCESS == code) {
INSERT_LIST(pSelect->pProjectionList, pCols);
ERASE_NODE(pSelect->pProjectionList);
......@@ -5826,10 +5830,6 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS
return pCxt->errCode;
}
if (IS_VAR_DATA_TYPE(pSchema->type) && strlen(pStmt->pVal->literal) > pSchema->bytes) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pStmt->pVal->literal);
}
pReq->isNull = (TSDB_DATA_TYPE_NULL == pStmt->pVal->node.resType.type);
if (targetDt.type == TSDB_DATA_TYPE_JSON) {
pReq->isNull = 0;
......
......@@ -236,7 +236,6 @@ int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char*
const char* prefix = "syntax error";
if (sourceStr == NULL) {
assert(additionalInfo != NULL);
snprintf(pBuf->buf, pBuf->len, msgFormat1, additionalInfo);
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
......@@ -254,40 +253,25 @@ int32_t buildSyntaxErrMsg(SMsgBuf* pBuf, const char* additionalInfo, const char*
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
SSchema* getTableColumnSchema(const STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
return (SSchema*)pTableMeta->schema;
}
SSchema* getTableColumnSchema(const STableMeta* pTableMeta) { return (SSchema*)pTableMeta->schema; }
static SSchema* getOneColumnSchema(const STableMeta* pTableMeta, int32_t colIndex) {
assert(pTableMeta != NULL && pTableMeta->schema != NULL && colIndex >= 0 &&
colIndex < (getNumOfColumns(pTableMeta) + getNumOfTags(pTableMeta)));
SSchema* pSchema = (SSchema*)pTableMeta->schema;
return &pSchema[colIndex];
}
SSchema* getTableTagSchema(const STableMeta* pTableMeta) {
assert(pTableMeta != NULL &&
(pTableMeta->tableType == TSDB_SUPER_TABLE || pTableMeta->tableType == TSDB_CHILD_TABLE));
return getOneColumnSchema(pTableMeta, getTableInfo(pTableMeta).numOfColumns);
}
int32_t getNumOfColumns(const STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
// table created according to super table, use data from super table
return getTableInfo(pTableMeta).numOfColumns;
}
int32_t getNumOfTags(const STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
return getTableInfo(pTableMeta).numOfTags;
}
int32_t getNumOfTags(const STableMeta* pTableMeta) { return getTableInfo(pTableMeta).numOfTags; }
STableComInfo getTableInfo(const STableMeta* pTableMeta) {
assert(pTableMeta != NULL);
return pTableMeta->tableInfo;
}
STableComInfo getTableInfo(const STableMeta* pTableMeta) { return pTableMeta->tableInfo; }
STableMeta* tableMetaDup(const STableMeta* pTableMeta) {
size_t size = TABLE_META_SIZE(pTableMeta);
......
......@@ -75,6 +75,11 @@ static SLogicNode* optFindPossibleNode(SLogicNode* pNode, FMayBeOptimized func)
return NULL;
}
static void optResetParent(SLogicNode* pNode) {
SNode* pChild = NULL;
FOREACH(pChild, pNode->pChildren) { ((SLogicNode*)pChild)->pParent = pNode; }
}
EDealRes scanPathOptHaveNormalColImpl(SNode* pNode, void* pContext) {
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
// *((bool*)pContext) = (COLUMN_TYPE_TAG != ((SColumnNode*)pNode)->colType);
......@@ -480,12 +485,18 @@ static int32_t pushDownCondOptPushCondToProject(SOptimizeContext* pCxt, SProject
return pushDownCondOptAppendCond(&pProject->node.pConditions, pCond);
}
static int32_t pushDownCondOptPushCondToJoin(SOptimizeContext* pCxt, SJoinLogicNode * pJoin, SNode** pCond) {
return pushDownCondOptAppendCond(&pJoin->node.pConditions, pCond);
}
static int32_t pushDownCondOptPushCondToChild(SOptimizeContext* pCxt, SLogicNode* pChild, SNode** pCond) {
switch (nodeType(pChild)) {
case QUERY_NODE_LOGIC_PLAN_SCAN:
return pushDownCondOptPushCondToScan(pCxt, (SScanLogicNode*)pChild, pCond);
case QUERY_NODE_LOGIC_PLAN_PROJECT:
return pushDownCondOptPushCondToProject(pCxt, (SProjectLogicNode*)pChild, pCond);
case QUERY_NODE_LOGIC_PLAN_JOIN:
return pushDownCondOptPushCondToJoin(pCxt, (SJoinLogicNode*)pChild, pCond);
default:
break;
}
......@@ -554,13 +565,83 @@ static int32_t pushDownCondOptCheckJoinOnCond(SOptimizeContext* pCxt, SJoinLogic
return TSDB_CODE_SUCCESS;
}
static int32_t pushDownCondOptPartJoinOnCondLogicCond(SJoinLogicNode* pJoin, SNode** ppMergeCond, SNode** ppOnCond) {
SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(pJoin->pOnConditions);
int32_t code = TSDB_CODE_SUCCESS;
SNodeList* pOnConds = NULL;
SNode* pCond = NULL;
FOREACH(pCond, pLogicCond->pParameterList) {
if (pushDownCondOptIsPriKeyEqualCond(pJoin, pCond)) {
*ppMergeCond = nodesCloneNode(pCond);
} else {
code = nodesListMakeAppend(&pOnConds, nodesCloneNode(pCond));
}
}
SNode* pTempOnCond = NULL;
if (TSDB_CODE_SUCCESS == code) {
code = nodesMergeConds(&pTempOnCond, &pOnConds);
}
if (TSDB_CODE_SUCCESS == code && NULL != *ppMergeCond) {
*ppOnCond = pTempOnCond;
nodesDestroyNode(pJoin->pOnConditions);
pJoin->pOnConditions = NULL;
return TSDB_CODE_SUCCESS;
} else {
nodesDestroyList(pOnConds);
nodesDestroyNode(pTempOnCond);
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
}
static int32_t pushDownCondOptPartJoinOnCond(SJoinLogicNode* pJoin, SNode** ppMergeCond, SNode** ppOnCond) {
if (QUERY_NODE_LOGIC_CONDITION == nodeType(pJoin->pOnConditions) &&
LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)(pJoin->pOnConditions))->condType) {
return pushDownCondOptPartJoinOnCondLogicCond(pJoin, ppMergeCond, ppOnCond);
}
if (pushDownCondOptIsPriKeyEqualCond(pJoin, pJoin->pOnConditions)) {
*ppMergeCond = nodesCloneNode(pJoin->pOnConditions);
*ppOnCond = NULL;
nodesDestroyNode(pJoin->pOnConditions);
pJoin->pOnConditions = NULL;
return TSDB_CODE_SUCCESS;
} else {
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
}
static int32_t pushDownCondOptJoinExtractMergeCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) {
int32_t code = pushDownCondOptCheckJoinOnCond(pCxt, pJoin);
SNode* pJoinMergeCond = NULL;
SNode* pJoinOnCond = NULL;
if (TSDB_CODE_SUCCESS == code) {
code = pushDownCondOptPartJoinOnCond(pJoin, &pJoinMergeCond, &pJoinOnCond);
}
if (TSDB_CODE_SUCCESS == code) {
pJoin->pMergeCondition = pJoinMergeCond;
pJoin->pOnConditions = pJoinOnCond;
} else {
nodesDestroyNode(pJoinMergeCond);
nodesDestroyNode(pJoinOnCond);
}
return code;
}
static int32_t pushDownCondOptDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) {
if (OPTIMIZE_FLAG_TEST_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE)) {
return TSDB_CODE_SUCCESS;
}
if (NULL == pJoin->node.pConditions) {
return pushDownCondOptCheckJoinOnCond(pCxt, pJoin);
int32_t code = pushDownCondOptJoinExtractMergeCond(pCxt, pJoin);
if (TSDB_CODE_SUCCESS == code) {
OPTIMIZE_FLAG_SET_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE);
pCxt->optimized = true;
}
return code;
}
SNode* pOnCond = NULL;
......@@ -579,10 +660,13 @@ static int32_t pushDownCondOptDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* p
pushDownCondOptPushCondToChild(pCxt, (SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 1), &pRightChildCond);
}
if (TSDB_CODE_SUCCESS == code) {
code = pushDownCondOptJoinExtractMergeCond(pCxt, pJoin);
}
if (TSDB_CODE_SUCCESS == code) {
OPTIMIZE_FLAG_SET_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE);
pCxt->optimized = true;
code = pushDownCondOptCheckJoinOnCond(pCxt, pJoin);
} else {
nodesDestroyNode(pOnCond);
nodesDestroyNode(pLeftChildCond);
......@@ -720,7 +804,8 @@ static int32_t pushDownCondOptDealAgg(SOptimizeContext* pCxt, SAggLogicNode* pAg
// TODO: remove it after full implementation of pushing down to child
if (1 != LIST_LENGTH(pAgg->node.pChildren) ||
QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pAgg->node.pChildren, 0)) &&
QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(nodesListGetNode(pAgg->node.pChildren, 0))) {
QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(nodesListGetNode(pAgg->node.pChildren, 0)) &&
QUERY_NODE_LOGIC_PLAN_JOIN != nodeType(nodesListGetNode(pAgg->node.pChildren, 0))) {
return TSDB_CODE_SUCCESS;
}
......@@ -1460,6 +1545,7 @@ static int32_t rewriteTailOptCreateSort(SIndefRowsFuncLogicNode* pIndef, SLogicN
pSort->groupSort = rewriteTailOptNeedGroupSort(pIndef);
TSWAP(pSort->node.pChildren, pIndef->node.pChildren);
optResetParent((SLogicNode*)pSort);
pSort->node.precision = pIndef->node.precision;
SFunctionNode* pTail = NULL;
......@@ -1667,6 +1753,7 @@ static int32_t rewriteUniqueOptCreateAgg(SIndefRowsFuncLogicNode* pIndef, SLogic
}
TSWAP(pAgg->node.pChildren, pIndef->node.pChildren);
optResetParent((SLogicNode*)pAgg);
pAgg->node.precision = pIndef->node.precision;
int32_t code = TSDB_CODE_SUCCESS;
......
......@@ -612,10 +612,8 @@ static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
int32_t code = TSDB_CODE_SUCCESS;
pJoin->joinType = pJoinLogicNode->joinType;
if (NULL != pJoinLogicNode->pOnConditions) {
code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pOnConditions,
&pJoin->pOnConditions);
}
setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pMergeCondition,
&pJoin->pMergeCondition);
if (TSDB_CODE_SUCCESS == code) {
code = setListSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->node.pTargets,
&pJoin->pTargets);
......@@ -623,6 +621,21 @@ static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
if (TSDB_CODE_SUCCESS == code) {
code = addDataBlockSlots(pCxt, pJoin->pTargets, pJoin->node.pOutputDataBlockDesc);
}
SNodeList* condCols = nodesMakeList();
if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pOnConditions) {
code = nodesCollectColumnsFromNode(pJoinLogicNode->pOnConditions, NULL, COLLECT_COL_TYPE_ALL, &condCols);
}
if (TSDB_CODE_SUCCESS == code) {
code = addDataBlockSlots(pCxt, condCols, pJoin->node.pOutputDataBlockDesc);
nodesDestroyList(condCols);
}
if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pOnConditions) {
code = setNodeSlotId(pCxt, ((SPhysiNode*)pJoin)->pOutputDataBlockDesc->dataBlockId, -1, pJoinLogicNode->pOnConditions,
&pJoin->pOnConditions);
}
if (TSDB_CODE_SUCCESS == code) {
code = setConditionsSlotId(pCxt, (const SLogicNode*)pJoinLogicNode, (SPhysiNode*)pJoin);
}
......
......@@ -1203,7 +1203,8 @@ typedef struct SQnodeSplitInfo {
static bool qndSplFindSplitNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SLogicNode* pNode,
SQnodeSplitInfo* pInfo) {
if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode) && NULL != pNode->pParent) {
if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode) && NULL != pNode->pParent &&
((SScanLogicNode*)pNode)->scanSeq[0] < 1 && ((SScanLogicNode*)pNode)->scanSeq[1] < 1) {
pInfo->pSplitNode = pNode;
pInfo->pSubplan = pSubplan;
return true;
......@@ -1220,6 +1221,7 @@ static int32_t qnodeSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
if (!splMatch(pCxt, pSubplan, 0, (FSplFindSplitNode)qndSplFindSplitNode, &info)) {
return TSDB_CODE_SUCCESS;
}
((SScanLogicNode*)info.pSplitNode)->dataRequired = FUNC_DATA_REQUIRED_DATA_LOAD;
int32_t code = splCreateExchangeNodeForSubplan(pCxt, info.pSubplan, info.pSplitNode, info.pSubplan->subplanType);
if (TSDB_CODE_SUCCESS == code) {
SLogicSubplan* pScanSubplan = splCreateScanSubplan(pCxt, info.pSplitNode, 0);
......
......@@ -63,6 +63,10 @@ TEST_F(PlanBasicTest, uniqueFunc) {
run("SELECT UNIQUE(c2 + 10), ts, c2 FROM t1 WHERE c1 > 10");
run("SELECT UNIQUE(c1) a FROM t1 ORDER BY a");
run("SELECT ts, UNIQUE(c1) FROM st1 PARTITION BY TBNAME");
run("SELECT TBNAME, UNIQUE(c1) FROM st1 PARTITION BY TBNAME");
}
TEST_F(PlanBasicTest, tailFunc) {
......@@ -81,6 +85,8 @@ TEST_F(PlanBasicTest, tailFunc) {
run("SELECT TAIL(c2 + 10, 10, 80) FROM t1 WHERE c1 > 10 PARTITION BY c1 LIMIT 5");
run("SELECT TAIL(c1, 2, 1) FROM st1s1 UNION ALL SELECT c1 FROM st1s2");
run("SELECT TAIL(c1, 1) FROM st2 WHERE jtag->'tag1' > 10");
}
TEST_F(PlanBasicTest, interpFunc) {
......
......@@ -91,10 +91,3 @@ TEST_F(PlanOtherTest, delete) {
run("DELETE FROM st1 WHERE ts > now - 2d and ts < now - 1d AND tag1 = 10");
}
TEST_F(PlanOtherTest, queryPolicy) {
useDb("root", "test");
tsQueryPolicy = QUERY_POLICY_QNODE;
run("SELECT COUNT(*) FROM st1");
}
......@@ -78,6 +78,7 @@ static void parseArg(int argc, char* argv[]) {
{"skipSql", required_argument, NULL, 's'},
{"limitSql", required_argument, NULL, 'i'},
{"log", required_argument, NULL, 'l'},
{"queryPolicy", required_argument, NULL, 'q'},
{0, 0, 0, 0}
};
// clang-format on
......@@ -95,6 +96,9 @@ static void parseArg(int argc, char* argv[]) {
case 'l':
setLogLevel(optarg);
break;
case 'q':
setQueryPolicy(optarg);
break;
default:
break;
}
......
......@@ -24,6 +24,7 @@
#include "mockCatalogService.h"
#include "parser.h"
#include "planInt.h"
#include "tglobal.h"
using namespace std;
using namespace testing;
......@@ -53,6 +54,7 @@ DumpModule g_dumpModule = DUMP_MODULE_NOTHING;
int32_t g_skipSql = 0;
int32_t g_limitSql = 0;
int32_t g_logLevel = 131;
int32_t g_queryPolicy = QUERY_POLICY_VNODE;
void setDumpModule(const char* pModule) {
if (NULL == pModule) {
......@@ -79,6 +81,7 @@ void setDumpModule(const char* pModule) {
void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); }
void setLimitSqlNum(const char* pNum) { g_limitSql = stoi(pNum); }
void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); }
void setQueryPolicy(const char* pQueryPolicy) { g_queryPolicy = stoi(pQueryPolicy); }
int32_t getLogLevel() { return g_logLevel; }
......@@ -105,7 +108,23 @@ class PlannerTestBaseImpl {
}
++sqlNum_;
switch (g_queryPolicy) {
case QUERY_POLICY_VNODE:
case QUERY_POLICY_HYBRID:
case QUERY_POLICY_QNODE:
runImpl(sql, g_queryPolicy);
break;
default:
runImpl(sql, QUERY_POLICY_VNODE);
runImpl(sql, QUERY_POLICY_HYBRID);
runImpl(sql, QUERY_POLICY_QNODE);
break;
}
}
void runImpl(const string& sql, int32_t queryPolicy) {
reset();
tsQueryPolicy = queryPolicy;
try {
SQuery* pQuery = nullptr;
doParseSql(sql, &pQuery);
......
......@@ -45,6 +45,7 @@ extern void setDumpModule(const char* pModule);
extern void setSkipSqlNum(const char* pNum);
extern void setLimitSqlNum(const char* pNum);
extern void setLogLevel(const char* pLogLevel);
extern void setQueryPolicy(const char* pQueryPolicy);
extern int32_t getLogLevel();
#endif // PLAN_TEST_UTIL_H
......@@ -83,9 +83,9 @@ typedef struct SSyncSnapshotReceiver {
SSyncSnapshotReceiver *snapshotReceiverCreate(SSyncNode *pSyncNode, SRaftId fromId);
void snapshotReceiverDestroy(SSyncSnapshotReceiver *pReceiver);
int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, SyncSnapshotSend *pBeginMsg);
int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver);
bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver);
int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg);
int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver);
bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver);
cJSON *snapshotReceiver2Json(SSyncSnapshotReceiver *pReceiver);
char *snapshotReceiver2Str(SSyncSnapshotReceiver *pReceiver);
......
......@@ -65,6 +65,7 @@ int32_t syncInit() {
syncCleanUp();
ret = -1;
} else {
sDebug("sync rsetId:%" PRId64 " is open", tsNodeRefId);
ret = syncEnvStart();
}
}
......@@ -77,6 +78,7 @@ void syncCleanUp() {
ASSERT(ret == 0);
if (tsNodeRefId != -1) {
sDebug("sync rsetId:%" PRId64 " is closed", tsNodeRefId);
taosCloseRef(tsNodeRefId);
tsNodeRefId = -1;
}
......@@ -96,6 +98,7 @@ int64_t syncOpen(const SSyncInfo* pSyncInfo) {
return -1;
}
sDebug("vgId:%d, rid:%" PRId64 " is added to rsetId:%" PRId64, pSyncInfo->vgId, pSyncNode->rid, tsNodeRefId);
return pSyncNode->rid;
}
......@@ -136,13 +139,14 @@ void syncStartStandBy(int64_t rid) {
void syncStop(int64_t rid) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
return;
}
if (pSyncNode == NULL) return;
int32_t vgId = pSyncNode->vgId;
syncNodeClose(pSyncNode);
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
taosRemoveRef(tsNodeRefId, rid);
sDebug("vgId:%d, rid:%" PRId64 " is removed from rsetId:%" PRId64, vgId, rid, tsNodeRefId);
}
int32_t syncSetStandby(int64_t rid) {
......@@ -154,13 +158,13 @@ int32_t syncSetStandby(int64_t rid) {
}
if (pSyncNode->state != TAOS_SYNC_STATE_FOLLOWER) {
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
terrno = TSDB_CODE_SYN_IS_LEADER;
} else {
terrno = TSDB_CODE_SYN_STANDBY_NOT_READY;
}
sError("failed to set standby since it is not follower, state:%s rid:%" PRId64, syncStr(pSyncNode->state), rid);
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
return -1;
}
......@@ -616,6 +620,7 @@ int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak) {
SSyncNode* pSyncNode = taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
taosReleaseRef(tsNodeRefId, rid);
terrno = TSDB_CODE_SYN_INTERNAL_ERROR;
return -1;
}
......
......@@ -14,6 +14,7 @@
*/
#include "syncRespMgr.h"
#include "syncRaftEntry.h"
#include "syncRaftStore.h"
SSyncRespMgr *syncRespMgrCreate(void *data, int64_t ttl) {
......@@ -116,4 +117,59 @@ void syncRespClean(SSyncRespMgr *pObj) {
taosThreadMutexUnlock(&(pObj->mutex));
}
void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl) {}
void syncRespCleanByTTL(SSyncRespMgr *pObj, int64_t ttl) {
SRespStub *pStub = (SRespStub *)taosHashIterate(pObj->pRespHash, NULL);
int cnt = 0;
SSyncNode *pSyncNode = pObj->data;
SArray *delIndexArray = taosArrayInit(0, sizeof(SyncIndex));
ASSERT(delIndexArray != NULL);
while (pStub) {
size_t len;
void *key = taosHashGetKey(pStub, &len);
SyncIndex *pIndex = (SyncIndex *)key;
int64_t nowMS = taosGetTimestampMs();
if (nowMS - pStub->createTime > ttl) {
taosArrayPush(delIndexArray, pIndex);
cnt++;
SSyncRaftEntry *pEntry = NULL;
int32_t code = 0;
if (pSyncNode->pLogStore != NULL) {
code = pSyncNode->pLogStore->syncLogGetEntry(pSyncNode->pLogStore, *pIndex, &pEntry);
if (code == 0 && pEntry != NULL) {
SFsmCbMeta cbMeta = {0};
cbMeta.index = pEntry->index;
cbMeta.lastConfigIndex = syncNodeGetSnapshotConfigIndex(pSyncNode, cbMeta.index);
cbMeta.isWeak = pEntry->isWeak;
cbMeta.code = TSDB_CODE_SYN_TIMEOUT;
cbMeta.state = pSyncNode->state;
cbMeta.seqNum = pEntry->seqNum;
cbMeta.term = pEntry->term;
cbMeta.currentTerm = pSyncNode->pRaftStore->currentTerm;
cbMeta.flag = 0;
SRpcMsg rpcMsg = pStub->rpcMsg;
rpcMsg.pCont = rpcMallocCont(pEntry->dataLen);
memcpy(rpcMsg.pCont, pEntry->data, pEntry->dataLen);
pSyncNode->pFsm->FpCommitCb(pSyncNode->pFsm, &rpcMsg, cbMeta);
syncEntryDestory(pEntry);
}
}
}
pStub = (SRespStub *)taosHashIterate(pObj->pRespHash, pStub);
}
int32_t arraySize = taosArrayGetSize(delIndexArray);
sDebug("vgId:%d, resp clean by ttl, cnt:%d, array-size:%d", pSyncNode->vgId, cnt, arraySize);
for (int32_t i = 0; i < arraySize; ++i) {
SyncIndex *pIndex = taosArrayGet(delIndexArray, i);
taosHashRemove(pObj->pRespHash, pIndex, sizeof(SyncIndex));
}
taosArrayDestroy(delIndexArray);
}
......@@ -22,9 +22,11 @@
#include "wal.h"
//----------------------------------
static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm,
SyncSnapshotSend *pBeginMsg);
static void snapshotReceiverGotData(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg);
static void snapshotSenderUpdateProgress(SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg);
static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg);
static void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver);
static void snapshotReceiverGotData(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg);
static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg);
//----------------------------------
SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaIndex) {
......@@ -68,7 +70,9 @@ void snapshotSenderDestroy(SSyncSnapshotSender *pSender) {
// close reader
if (pSender->pReader != NULL) {
int32_t ret = pSender->pSyncNode->pFsm->FpSnapshotStopRead(pSender->pSyncNode->pFsm, pSender->pReader);
ASSERT(ret == 0);
if (ret != 0) {
syncNodeErrorLog(pSender->pSyncNode, "stop reader error");
}
pSender->pReader = NULL;
}
......@@ -79,7 +83,12 @@ void snapshotSenderDestroy(SSyncSnapshotSender *pSender) {
bool snapshotSenderIsStart(SSyncSnapshotSender *pSender) { return pSender->start; }
// begin send snapshot by snapshot, pReader
// begin send snapshot by param, snapshot, pReader
//
// action:
// 1. assert reader not start
// 2. update state
// 3. send first snapshot block
int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshotParam snapshotParam, SSnapshot snapshot,
void *pReader) {
ASSERT(!snapshotSenderIsStart(pSender));
......@@ -98,7 +107,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshotParam snapsho
// update term
pSender->term = pSender->pSyncNode->pRaftStore->currentTerm;
++(pSender->privateTerm);
++(pSender->privateTerm); // increase private term
// update state
pSender->finish = false;
......@@ -114,9 +123,7 @@ int32_t snapshotSenderStart(SSyncSnapshotSender *pSender, SSnapshotParam snapsho
code = pSender->pSyncNode->pLogStore->syncLogGetEntry(pSender->pSyncNode->pLogStore,
pSender->snapshot.lastConfigIndex, &pEntry);
if (code == 0) {
ASSERT(pEntry != NULL);
if (code == 0 && pEntry != NULL) {
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pEntry, &rpcMsg);
......@@ -207,6 +214,8 @@ int32_t snapshotSenderStop(SSyncSnapshotSender *pSender, bool finish) {
pSender->start = false;
pSender->finish = finish;
// do not update term, maybe print
// event log
do {
char *eventLog = snapshotSender2SimpleStr(pSender, "snapshot sender stop");
......@@ -243,6 +252,7 @@ int32_t snapshotSend(SSyncSnapshotSender *pSender) {
pMsg->srcId = pSender->pSyncNode->myRaftId;
pMsg->destId = (pSender->pSyncNode->replicasId)[pSender->replicaIndex];
pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm;
pMsg->beginIndex = pSender->snapshotParam.start;
pMsg->lastIndex = pSender->snapshot.lastApplyIndex;
pMsg->lastTerm = pSender->snapshot.lastApplyTerm;
pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex;
......@@ -281,11 +291,13 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) {
pMsg->srcId = pSender->pSyncNode->myRaftId;
pMsg->destId = (pSender->pSyncNode->replicasId)[pSender->replicaIndex];
pMsg->term = pSender->pSyncNode->pRaftStore->currentTerm;
pMsg->beginIndex = pSender->snapshotParam.start;
pMsg->lastIndex = pSender->snapshot.lastApplyIndex;
pMsg->lastTerm = pSender->snapshot.lastApplyTerm;
pMsg->lastConfigIndex = pSender->snapshot.lastConfigIndex;
pMsg->lastConfig = pSender->lastConfig;
pMsg->seq = pSender->seq;
pMsg->privateTerm = pSender->privateTerm;
memcpy(pMsg->data, pSender->pCurrentBlock, pSender->blockLen);
// send msg
......@@ -305,6 +317,12 @@ int32_t snapshotReSend(SSyncSnapshotSender *pSender) {
return 0;
}
static void snapshotSenderUpdateProgress(SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) {
ASSERT(pMsg->ack == pSender->seq);
pSender->ack = pMsg->ack;
++(pSender->seq);
}
cJSON *snapshotSender2Json(SSyncSnapshotSender *pSender) {
char u64buf[128];
cJSON *pRoot = cJSON_CreateObject();
......@@ -371,10 +389,11 @@ char *snapshotSender2SimpleStr(SSyncSnapshotSender *pSender, char *event) {
syncUtilU642Addr(destId.addr, host, sizeof(host), &port);
snprintf(s, len,
"%s {%p laindex:%ld laterm:%lu lcindex:%ld seq:%d ack:%d finish:%d pterm:%lu replica-index:%d %s:%d}", event,
pSender, pSender->snapshot.lastApplyIndex, pSender->snapshot.lastApplyTerm,
pSender->snapshot.lastConfigIndex, pSender->seq, pSender->ack, pSender->finish, pSender->privateTerm,
pSender->replicaIndex, host, port);
"%s {%p s-param:%ld e-param:%ld laindex:%ld laterm:%lu lcindex:%ld seq:%d ack:%d finish:%d pterm:%lu "
"replica-index:%d %s:%d}",
event, pSender, pSender->snapshotParam.start, pSender->snapshotParam.end, pSender->snapshot.lastApplyIndex,
pSender->snapshot.lastApplyTerm, pSender->snapshot.lastConfigIndex, pSender->seq, pSender->ack,
pSender->finish, pSender->privateTerm, pSender->replicaIndex, host, port);
return s;
}
......@@ -429,11 +448,10 @@ bool snapshotReceiverIsStart(SSyncSnapshotReceiver *pReceiver) { return pReceive
// static do start by privateTerm, pBeginMsg
// receive first snapshot data
// write first block data
static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm,
SyncSnapshotSend *pBeginMsg) {
static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg) {
// update state
pReceiver->term = pReceiver->pSyncNode->pRaftStore->currentTerm;
pReceiver->privateTerm = privateTerm;
pReceiver->privateTerm = pBeginMsg->privateTerm;
pReceiver->ack = SYNC_SNAPSHOT_SEQ_BEGIN;
pReceiver->fromId = pBeginMsg->srcId;
pReceiver->start = true;
......@@ -445,7 +463,7 @@ static void snapshotReceiverDoStart(SSyncSnapshotReceiver *pReceiver, SyncTerm p
pReceiver->snapshotParam.start = pBeginMsg->beginIndex;
pReceiver->snapshotParam.end = pBeginMsg->lastIndex;
// write data
// start writer
ASSERT(pReceiver->pWriter == NULL);
int32_t ret = pReceiver->pSyncNode->pFsm->FpSnapshotStartWrite(pReceiver->pSyncNode->pFsm,
&(pReceiver->snapshotParam), &(pReceiver->pWriter));
......@@ -481,10 +499,10 @@ static void snapshotReceiverForceStop(SSyncSnapshotReceiver *pReceiver) {
// if receiver receive msg from seq = SYNC_SNAPSHOT_SEQ_BEGIN, start receiver
// if already start, force close, start again
int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm privateTerm, SyncSnapshotSend *pBeginMsg) {
int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pBeginMsg) {
if (!snapshotReceiverIsStart(pReceiver)) {
// first start
snapshotReceiverDoStart(pReceiver, privateTerm, pBeginMsg);
snapshotReceiverDoStart(pReceiver, pBeginMsg);
} else {
// already start
......@@ -494,12 +512,14 @@ int32_t snapshotReceiverStart(SSyncSnapshotReceiver *pReceiver, SyncTerm private
snapshotReceiverForceStop(pReceiver);
// start again
snapshotReceiverDoStart(pReceiver, privateTerm, pBeginMsg);
snapshotReceiverDoStart(pReceiver, pBeginMsg);
}
return 0;
}
// just set start = false
// FpSnapshotStopWrite should not be called, assert writer == NULL
int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) {
if (pReceiver->pWriter != NULL) {
int32_t ret =
......@@ -522,6 +542,7 @@ int32_t snapshotReceiverStop(SSyncSnapshotReceiver *pReceiver) {
return 0;
}
// when recv last snapshot block, apply data into snapshot
static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg) {
ASSERT(pMsg->seq == SYNC_SNAPSHOT_SEQ_END);
......@@ -550,7 +571,7 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap
pReceiver->pSyncNode->commitIndex = pReceiver->snapshot.lastApplyIndex;
}
// stop writer
// stop writer, apply data
code = pReceiver->pSyncNode->pFsm->FpSnapshotStopWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter, true);
if (code != 0) {
syncNodeErrorLog(pReceiver->pSyncNode, "snapshot stop writer true error");
......@@ -579,15 +600,20 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap
return 0;
}
// apply data block
// update progress
static void snapshotReceiverGotData(SSyncSnapshotReceiver *pReceiver, SyncSnapshotSend *pMsg) {
ASSERT(pMsg->seq == pReceiver->ack + 1);
if (pReceiver->pWriter != NULL) {
if (pMsg->dataLen > 0) {
// apply data block
int32_t code = pReceiver->pSyncNode->pFsm->FpSnapshotDoWrite(pReceiver->pSyncNode->pFsm, pReceiver->pWriter,
pMsg->data, pMsg->dataLen);
ASSERT(code == 0);
}
// update progress
pReceiver->ack = pMsg->seq;
// event log
......@@ -665,14 +691,23 @@ char *snapshotReceiver2SimpleStr(SSyncSnapshotReceiver *pReceiver, char *event)
uint16_t port;
syncUtilU642Addr(fromId.addr, host, sizeof(host), &port);
snprintf(s, len, "%s {%p start:%d ack:%d term:%lu pterm:%lu from:%s:%d laindex:%ld laterm:%lu lcindex:%ld}", event,
pReceiver, pReceiver->start, pReceiver->ack, pReceiver->term, pReceiver->privateTerm, host, port,
pReceiver->snapshot.lastApplyIndex, pReceiver->snapshot.lastApplyTerm, pReceiver->snapshot.lastConfigIndex);
snprintf(s, len,
"%s {%p start:%d ack:%d term:%lu pterm:%lu from:%s:%d s-param:%ld e-param:%ld laindex:%ld laterm:%lu "
"lcindex:%ld}",
event, pReceiver, pReceiver->start, pReceiver->ack, pReceiver->term, pReceiver->privateTerm, host, port,
pReceiver->snapshotParam.start, pReceiver->snapshotParam.end, pReceiver->snapshot.lastApplyIndex,
pReceiver->snapshot.lastApplyTerm, pReceiver->snapshot.lastConfigIndex);
return s;
}
// receiver do something
// receiver on message
//
// condition 1, recv SYNC_SNAPSHOT_SEQ_BEGIN, start receiver, update privateTerm
// condition 2, recv SYNC_SNAPSHOT_SEQ_END, finish receiver(apply snapshot data, update commit index, maybe reconfig)
// condition 3, recv SYNC_SNAPSHOT_SEQ_FORCE_CLOSE, force close
// condition 4, got data, update ack
//
int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
// get receiver
SSyncSnapshotReceiver *pReceiver = pSyncNode->pNewNodeReceiver;
......@@ -683,11 +718,13 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
if (pSyncNode->state == TAOS_SYNC_STATE_FOLLOWER) {
if (pMsg->term == pSyncNode->pRaftStore->currentTerm) {
if (pMsg->seq == SYNC_SNAPSHOT_SEQ_BEGIN) {
// condition 1
// begin, no data
snapshotReceiverStart(pReceiver, pMsg->privateTerm, pMsg);
snapshotReceiverStart(pReceiver, pMsg);
needRsp = true;
} else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_END) {
// condition 2
// end, finish FSM
code = snapshotReceiverFinish(pReceiver, pMsg);
if (code == 0) {
......@@ -697,7 +734,6 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
// maybe update lastconfig
if (pMsg->lastConfigIndex >= SYNC_INDEX_BEGIN) {
// int32_t oldReplicaNum = pSyncNode->replicaNum;
SSyncCfg oldSyncCfg = pSyncNode->pRaftCfg->cfg;
// update new config myIndex
......@@ -709,11 +745,13 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
}
} else if (pMsg->seq == SYNC_SNAPSHOT_SEQ_FORCE_CLOSE) {
// condition 3
// force close
snapshotReceiverForceStop(pReceiver);
needRsp = false;
} else if (pMsg->seq > SYNC_SNAPSHOT_SEQ_BEGIN && pMsg->seq < SYNC_SNAPSHOT_SEQ_END) {
// condition 4
// transfering
if (pMsg->seq == pReceiver->ack + 1) {
snapshotReceiverGotData(pReceiver, pMsg);
......@@ -752,6 +790,7 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
syncNodeSendMsgById(&(pRspMsg->destId), pSyncNode, &rpcMsg);
syncSnapshotRspDestroy(pRspMsg);
}
} else {
// error log
do {
......@@ -759,6 +798,8 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
syncNodeErrorLog(pSyncNode, eventLog);
taosMemoryFree(eventLog);
} while (0);
return -1;
}
} else {
// error log
......@@ -767,19 +808,19 @@ int32_t syncNodeOnSnapshotSendCb(SSyncNode *pSyncNode, SyncSnapshotSend *pMsg) {
syncNodeErrorLog(pSyncNode, eventLog);
taosMemoryFree(eventLog);
} while (0);
return -1;
}
return 0;
}
static void snapshotSenderUpdateProgress(SSyncSnapshotSender *pSender, SyncSnapshotRsp *pMsg) {
ASSERT(pMsg->ack == pSender->seq);
pSender->ack = pMsg->ack;
++(pSender->seq);
}
// sender receives ack, set seq = ack + 1, send msg from seq
// if ack == SYNC_SNAPSHOT_SEQ_END, stop sender
// sender on message
//
// condition 1 sender receives SYNC_SNAPSHOT_SEQ_END, close sender
// condition 2 sender receives ack, set seq = ack + 1, send msg from seq
// condition 3 sender receives error msg, just print error log
//
int32_t syncNodeOnSnapshotRspCb(SSyncNode *pSyncNode, SyncSnapshotRsp *pMsg) {
// if already drop replica, do not process
if (!syncNodeInRaftGroup(pSyncNode, &(pMsg->srcId)) && pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
......@@ -794,12 +835,14 @@ int32_t syncNodeOnSnapshotRspCb(SSyncNode *pSyncNode, SyncSnapshotRsp *pMsg) {
// state, term, seq/ack
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
if (pMsg->term == pSyncNode->pRaftStore->currentTerm) {
// receiver ack is finish, close sender
// condition 1
// receive ack is finish, close sender
if (pMsg->ack == SYNC_SNAPSHOT_SEQ_END) {
snapshotSenderStop(pSender, true);
return 0;
}
// condition 2
// send next msg
if (pMsg->ack == pSender->seq) {
// update sender ack
......@@ -807,6 +850,7 @@ int32_t syncNodeOnSnapshotRspCb(SSyncNode *pSyncNode, SyncSnapshotRsp *pMsg) {
snapshotSend(pSender);
} else if (pMsg->ack == pSender->seq - 1) {
// maybe resend
snapshotReSend(pSender);
} else {
......
......@@ -8,7 +8,13 @@ void print(SHashObj *pNextIndex) {
printf("----------------\n");
uint64_t *p = (uint64_t *)taosHashIterate(pNextIndex, NULL);
while (p) {
printf("%lu \n", *p);
size_t len;
void* key = taosHashGetKey(p, &len);
SRaftId *pRaftId = (SRaftId*)key;
printf("key:<%lu, %d>, value:%lu \n", pRaftId->addr, pRaftId->vgId, *p);
p = (uint64_t *)taosHashIterate(pNextIndex, p);
}
}
......
......@@ -73,9 +73,15 @@ void syncRespMgrGetAndDelTest(uint64_t i) {
}
}
SSyncNode *createSyncNode() {
SSyncNode *pSyncNode = (SSyncNode*)taosMemoryMalloc(sizeof(SSyncNode));
memset(pSyncNode, 0, sizeof(SSyncNode));
return pSyncNode;
}
void test1() {
printf("------- test1 ---------\n");
pMgr = syncRespMgrCreate(NULL, 0);
pMgr = syncRespMgrCreate(createSyncNode(), 0);
assert(pMgr != NULL);
syncRespMgrInsert(10);
......@@ -100,7 +106,7 @@ void test1() {
void test2() {
printf("------- test2 ---------\n");
pMgr = syncRespMgrCreate(NULL, 0);
pMgr = syncRespMgrCreate(createSyncNode(), 0);
assert(pMgr != NULL);
syncRespMgrInsert(10);
......@@ -117,7 +123,7 @@ void test2() {
void test3() {
printf("------- test3 ---------\n");
pMgr = syncRespMgrCreate(NULL, 0);
pMgr = syncRespMgrCreate(createSyncNode(), 0);
assert(pMgr != NULL);
syncRespMgrInsert(10);
......@@ -132,13 +138,34 @@ void test3() {
syncRespMgrDestroy(pMgr);
}
void test4() {
printf("------- test4 ---------\n");
pMgr = syncRespMgrCreate(createSyncNode(), 2);
assert(pMgr != NULL);
syncRespMgrInsert(5);
syncRespMgrPrint();
taosMsleep(3000);
syncRespMgrInsert(3);
syncRespMgrPrint();
printf("====== after clean ttl \n");
syncRespClean(pMgr);
syncRespMgrPrint();
syncRespMgrDestroy(pMgr);
}
int main() {
tsAsyncLog = 0;
sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE;
sDebugFlag = DEBUG_DEBUG + DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE;
logTest();
test1();
test2();
test3();
test4();
return 0;
}
......@@ -40,8 +40,8 @@ struct SBTree {
#define TDB_BTREE_PAGE_IS_ROOT(PAGE) (TDB_BTREE_PAGE_GET_FLAGS(PAGE) & TDB_BTREE_ROOT)
#define TDB_BTREE_PAGE_IS_LEAF(PAGE) (TDB_BTREE_PAGE_GET_FLAGS(PAGE) & TDB_BTREE_LEAF)
#define TDB_BTREE_PAGE_IS_OVFL(PAGE) (TDB_BTREE_PAGE_GET_FLAGS(PAGE) & TDB_BTREE_OVFL)
#define TDB_BTREE_ASSERT_FLAG(flags) \
ASSERT(TDB_FLAG_IS(flags, TDB_BTREE_ROOT) || TDB_FLAG_IS(flags, TDB_BTREE_LEAF) || \
#define TDB_BTREE_ASSERT_FLAG(flags) \
ASSERT(TDB_FLAG_IS(flags, TDB_BTREE_ROOT) || TDB_FLAG_IS(flags, TDB_BTREE_LEAF) || \
TDB_FLAG_IS(flags, TDB_BTREE_ROOT | TDB_BTREE_LEAF) || TDB_FLAG_IS(flags, 0) || \
TDB_FLAG_IS(flags, TDB_BTREE_OVFL))
......@@ -58,7 +58,7 @@ typedef struct {
static int tdbDefaultKeyCmprFn(const void *pKey1, int keyLen1, const void *pKey2, int keyLen2);
static int tdbBtreeOpenImpl(SBTree *pBt);
//static int tdbBtreeInitPage(SPage *pPage, void *arg, int init);
// static int tdbBtreeInitPage(SPage *pPage, void *arg, int init);
static int tdbBtreeEncodeCell(SPage *pPage, const void *pKey, int kLen, const void *pVal, int vLen, SCell *pCell,
int *szCell, TXN *pTxn, SBTree *pBt);
static int tdbBtreeDecodeCell(SPage *pPage, const SCell *pCell, SCellDecoder *pDecoder, TXN *pTxn, SBTree *pBt);
......@@ -321,7 +321,7 @@ static int tdbBtreeOpenImpl(SBTree *pBt) {
{
// 1. TODO: Search the main DB to check if the DB exists
ret = tdbPagerOpenDB(pBt->pPager, &pgno, true);
ret = tdbPagerOpenDB(pBt->pPager, &pgno, true, pBt);
ASSERT(ret == 0);
}
......@@ -721,7 +721,8 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
int szNewCell;
SPgno pgno;
pgno = TDB_PAGE_PGNO(pNews[iNew]);
tdbBtreeEncodeCell(pParent, cd.pKey, cd.kLen, (void *)&pgno, sizeof(SPgno), pNewCell, &szNewCell, pTxn, pBt);
tdbBtreeEncodeCell(pParent, cd.pKey, cd.kLen, (void *)&pgno, sizeof(SPgno), pNewCell, &szNewCell, pTxn,
pBt);
tdbPageInsertCell(pParent, sIdx++, pNewCell, szNewCell, 0);
tdbOsFree(pNewCell);
}
......@@ -916,10 +917,10 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const
int surplus = minLocal + (nPayload + nHeader - minLocal) % (maxLocal - sizeof(SPgno));
int nLocal = surplus <= maxLocal ? surplus : minLocal;
//int ofpCap = tdbPageCapacity(pBt->pageSize, sizeof(SIntHdr));
// int ofpCap = tdbPageCapacity(pBt->pageSize, sizeof(SIntHdr));
// fetch a new ofp and make it dirty
SPgno pgno = 0;
SPgno pgno = 0;
SPage *ofp, *nextOfp;
ret = tdbFetchOvflPage(&pgno, &ofp, pTxn, pBt);
......@@ -942,8 +943,8 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const
nLeft -= kLen;
// pack partial val to local if any space left
if (nLocal > kLen + 4) {
memcpy(pCell + nHeader + kLen, pVal, nLocal - kLen - sizeof(SPgno));
nLeft -= nLocal - kLen - sizeof(SPgno);
memcpy(pCell + nHeader + kLen, pVal, nLocal - kLen - sizeof(SPgno));
nLeft -= nLocal - kLen - sizeof(SPgno);
}
// pack nextPgno
......@@ -951,132 +952,132 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const
// pack left val data to ovpages
do {
lastPage = 0;
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
bytes = nLeft;
lastPage = 1;
} else {
bytes = ofp->maxLocal - sizeof(SPgno);
}
// fetch next ofp if not last page
if (!lastPage) {
// fetch a new ofp and make it dirty
ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt);
if (ret < 0) {
tdbFree(pBuf);
return -1;
}
} else {
pgno = 0;
}
memcpy(pBuf, ((SCell *)pVal) + vLen - nLeft, bytes);
memcpy(pBuf + bytes, &pgno, sizeof(pgno));
ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0);
if (ret < 0) {
tdbFree(pBuf);
return -1;
}
ofp = nextOfp;
nLeft -= bytes;
lastPage = 0;
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
bytes = nLeft;
lastPage = 1;
} else {
bytes = ofp->maxLocal - sizeof(SPgno);
}
// fetch next ofp if not last page
if (!lastPage) {
// fetch a new ofp and make it dirty
ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt);
if (ret < 0) {
tdbFree(pBuf);
return -1;
}
} else {
pgno = 0;
}
memcpy(pBuf, ((SCell *)pVal) + vLen - nLeft, bytes);
memcpy(pBuf + bytes, &pgno, sizeof(pgno));
ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0);
if (ret < 0) {
tdbFree(pBuf);
return -1;
}
ofp = nextOfp;
nLeft -= bytes;
} while (nLeft > 0);
} else {
int nLeftKey = kLen;
// pack partial key and nextPgno
memcpy(pCell + nHeader, pKey, nLocal - 4);
nLeft -= nLocal - 4;
nLeftKey -= nLocal -4;
nLeftKey -= nLocal - 4;
memcpy(pCell + nHeader + nLocal - 4, &pgno, sizeof(pgno));
int lastKeyPageSpace = 0;
// pack left key & val to ovpages
do {
// cal key to cpy
int lastKeyPage = 0;
if (nLeftKey <= ofp->maxLocal - sizeof(SPgno)) {
bytes = nLeftKey;
lastKeyPage = 1;
lastKeyPageSpace = ofp->maxLocal - sizeof(SPgno) - nLeftKey;
} else {
bytes = ofp->maxLocal - sizeof(SPgno);
}
// cpy key
memcpy(pBuf, ((SCell *)pKey) + kLen - nLeftKey, bytes);
if (lastKeyPage) {
if (lastKeyPageSpace >= vLen) {
memcpy(pBuf + kLen -nLeftKey, pVal, vLen);
nLeft -= vLen;
pgno = 0;
} else {
memcpy(pBuf + kLen -nLeftKey, pVal, lastKeyPageSpace);
nLeft -= lastKeyPageSpace;
// fetch next ofp, a new ofp and make it dirty
ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt);
if (ret < 0) {
return -1;
}
}
} else {
// fetch next ofp, a new ofp and make it dirty
ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt);
if (ret < 0) {
return -1;
}
}
memcpy(pBuf + kLen - nLeft, &pgno, sizeof(pgno));
ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0);
if (ret < 0) {
return -1;
}
ofp = nextOfp;
nLeftKey -= bytes;
nLeft -= bytes;
// cal key to cpy
int lastKeyPage = 0;
if (nLeftKey <= ofp->maxLocal - sizeof(SPgno)) {
bytes = nLeftKey;
lastKeyPage = 1;
lastKeyPageSpace = ofp->maxLocal - sizeof(SPgno) - nLeftKey;
} else {
bytes = ofp->maxLocal - sizeof(SPgno);
}
// cpy key
memcpy(pBuf, ((SCell *)pKey) + kLen - nLeftKey, bytes);
if (lastKeyPage) {
if (lastKeyPageSpace >= vLen) {
memcpy(pBuf + kLen - nLeftKey, pVal, vLen);
nLeft -= vLen;
pgno = 0;
} else {
memcpy(pBuf + kLen - nLeftKey, pVal, lastKeyPageSpace);
nLeft -= lastKeyPageSpace;
// fetch next ofp, a new ofp and make it dirty
ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt);
if (ret < 0) {
return -1;
}
}
} else {
// fetch next ofp, a new ofp and make it dirty
ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt);
if (ret < 0) {
return -1;
}
}
memcpy(pBuf + kLen - nLeft, &pgno, sizeof(pgno));
ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0);
if (ret < 0) {
return -1;
}
ofp = nextOfp;
nLeftKey -= bytes;
nLeft -= bytes;
} while (nLeftKey > 0);
while (nLeft > 0) {
// pack left val data to ovpages
lastPage = 0;
if (nLeft <= maxLocal - sizeof(SPgno)) {
bytes = nLeft;
lastPage = 1;
} else {
bytes = maxLocal - sizeof(SPgno);
}
// fetch next ofp if not last page
if (!lastPage) {
// fetch a new ofp and make it dirty
ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt);
if (ret < 0) {
tdbFree(pBuf);
return -1;
}
} else {
pgno = 0;
}
memcpy(pBuf, ((SCell *)pVal) + vLen - nLeft, bytes);
memcpy(pBuf + bytes, &pgno, sizeof(pgno));
ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0);
if (ret < 0) {
tdbFree(pBuf);
return -1;
}
ofp = nextOfp;
nLeft -= bytes;
// pack left val data to ovpages
lastPage = 0;
if (nLeft <= maxLocal - sizeof(SPgno)) {
bytes = nLeft;
lastPage = 1;
} else {
bytes = maxLocal - sizeof(SPgno);
}
// fetch next ofp if not last page
if (!lastPage) {
// fetch a new ofp and make it dirty
ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt);
if (ret < 0) {
tdbFree(pBuf);
return -1;
}
} else {
pgno = 0;
}
memcpy(pBuf, ((SCell *)pVal) + vLen - nLeft, bytes);
memcpy(pBuf + bytes, &pgno, sizeof(pgno));
ret = tdbPageInsertCell(ofp, 0, pBuf, bytes + sizeof(pgno), 0);
if (ret < 0) {
tdbFree(pBuf);
return -1;
}
ofp = nextOfp;
nLeft -= bytes;
}
}
......@@ -1142,7 +1143,8 @@ static int tdbBtreeEncodeCell(SPage *pPage, const void *pKey, int kLen, const vo
return 0;
}
static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, SCellDecoder *pDecoder, TXN *pTxn, SBTree *pBt) {
static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader, SCellDecoder *pDecoder, TXN *pTxn,
SBTree *pBt) {
int ret = 0;
int nPayload;
int maxLocal = pPage->maxLocal;
......@@ -1171,149 +1173,149 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
int surplus = minLocal + (nPayload + nHeader - minLocal) % (maxLocal - sizeof(SPgno));
int nLocal = surplus <= maxLocal ? surplus : minLocal;
int nLeft = nPayload;
SPgno pgno = 0;
int nLeft = nPayload;
SPgno pgno = 0;
SPage *ofp;
SCell *ofpCell;
int bytes;
int lastPage = 0;
int bytes;
int lastPage = 0;
if (nLocal >= pDecoder->kLen + 4) {
pDecoder->pKey = (SCell *)pCell + nHeader;
nLeft -= kLen;
if (nLocal > kLen + 4) {
// read partial val to local
pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen);
if (pDecoder->pVal == NULL) {
return -1;
}
TDB_CELLDECODER_SET_FREE_VAL(pDecoder);
// read partial val to local
pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen);
if (pDecoder->pVal == NULL) {
return -1;
}
TDB_CELLDECODER_SET_FREE_VAL(pDecoder);
memcpy(pDecoder->pVal, pCell + nHeader + kLen, nLocal - kLen - sizeof(SPgno));
memcpy(pDecoder->pVal, pCell + nHeader + kLen, nLocal - kLen - sizeof(SPgno));
nLeft -= nLocal - kLen - sizeof(SPgno);
nLeft -= nLocal - kLen - sizeof(SPgno);
}
memcpy(&pgno, pCell + nHeader + nPayload - nLeft, sizeof(pgno));
// unpack left val data from ovpages
while (pgno != 0) {
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
while (pgno != 0) {
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
ofpCell = tdbPageGetCell(ofp, 0);
ofpCell = tdbPageGetCell(ofp, 0);
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
bytes = nLeft;
lastPage = 1;
} else {
bytes = ofp->maxLocal - sizeof(SPgno);
}
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
bytes = nLeft;
lastPage = 1;
} else {
bytes = ofp->maxLocal - sizeof(SPgno);
}
memcpy(pDecoder->pVal + vLen - nLeft, ofpCell, bytes);
nLeft -= bytes;
memcpy(pDecoder->pVal + vLen - nLeft, ofpCell, bytes);
nLeft -= bytes;
memcpy(&pgno, ofpCell + bytes, sizeof(pgno));
memcpy(&pgno, ofpCell + bytes, sizeof(pgno));
}
} else {
int nLeftKey = kLen;
// load partial key and nextPgno
pDecoder->pKey = tdbRealloc(pDecoder->pKey, kLen);
if (pDecoder->pKey == NULL) {
return -1;
return -1;
}
TDB_CELLDECODER_SET_FREE_KEY(pDecoder);
memcpy(pDecoder->pKey, pCell + nHeader, nLocal - 4);
nLeft -= nLocal - 4;
nLeftKey -= nLocal -4;
nLeftKey -= nLocal - 4;
memcpy(&pgno, pCell + nHeader + nLocal - 4, sizeof(pgno));
int lastKeyPageSpace = 0;
// load left key & val to ovpages
while (pgno != 0) {
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
ofpCell = tdbPageGetCell(ofp, 0);
int lastKeyPage = 0;
if (nLeftKey <= maxLocal - sizeof(SPgno)) {
bytes = nLeftKey;
lastKeyPage = 1;
lastKeyPageSpace = ofp->maxLocal - sizeof(SPgno) - nLeftKey;
} else {
bytes = ofp->maxLocal - sizeof(SPgno);
}
// cpy key
memcpy(pDecoder->pKey + kLen - nLeftKey, ofpCell, bytes);
if (lastKeyPage) {
if (lastKeyPageSpace >= vLen) {
pDecoder->pVal = ofpCell + kLen -nLeftKey;
nLeft -= vLen;
pgno = 0;
} else {
// read partial val to local
pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen);
if (pDecoder->pVal == NULL) {
return -1;
}
TDB_CELLDECODER_SET_FREE_VAL(pDecoder);
memcpy(pDecoder->pVal, ofpCell + kLen -nLeftKey, lastKeyPageSpace);
nLeft -= lastKeyPageSpace;
}
}
memcpy(&pgno, ofpCell + bytes, sizeof(pgno));
nLeftKey -= bytes;
nLeft -= bytes;
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
ofpCell = tdbPageGetCell(ofp, 0);
int lastKeyPage = 0;
if (nLeftKey <= maxLocal - sizeof(SPgno)) {
bytes = nLeftKey;
lastKeyPage = 1;
lastKeyPageSpace = ofp->maxLocal - sizeof(SPgno) - nLeftKey;
} else {
bytes = ofp->maxLocal - sizeof(SPgno);
}
// cpy key
memcpy(pDecoder->pKey + kLen - nLeftKey, ofpCell, bytes);
if (lastKeyPage) {
if (lastKeyPageSpace >= vLen) {
pDecoder->pVal = ofpCell + kLen - nLeftKey;
nLeft -= vLen;
pgno = 0;
} else {
// read partial val to local
pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen);
if (pDecoder->pVal == NULL) {
return -1;
}
TDB_CELLDECODER_SET_FREE_VAL(pDecoder);
memcpy(pDecoder->pVal, ofpCell + kLen - nLeftKey, lastKeyPageSpace);
nLeft -= lastKeyPageSpace;
}
}
memcpy(&pgno, ofpCell + bytes, sizeof(pgno));
nLeftKey -= bytes;
nLeft -= bytes;
}
while (nLeft > 0) {
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
ofpCell = tdbPageGetCell(ofp, 0);
// load left val data to ovpages
lastPage = 0;
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
bytes = nLeft;
lastPage = 1;
} else {
bytes = ofp->maxLocal - sizeof(SPgno);
}
if (lastPage) {
pgno = 0;
}
if (!pDecoder->pVal) {
pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen);
if (pDecoder->pVal == NULL) {
return -1;
}
TDB_CELLDECODER_SET_FREE_VAL(pDecoder);
}
memcpy(pDecoder->pVal, ofpCell + vLen - nLeft, bytes);
nLeft -= bytes;
memcpy(&pgno, ofpCell + vLen - nLeft + bytes, sizeof(pgno));
nLeft -= bytes;
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
ofpCell = tdbPageGetCell(ofp, 0);
// load left val data to ovpages
lastPage = 0;
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
bytes = nLeft;
lastPage = 1;
} else {
bytes = ofp->maxLocal - sizeof(SPgno);
}
if (lastPage) {
pgno = 0;
}
if (!pDecoder->pVal) {
pDecoder->pVal = tdbRealloc(pDecoder->pVal, vLen);
if (pDecoder->pVal == NULL) {
return -1;
}
TDB_CELLDECODER_SET_FREE_VAL(pDecoder);
}
memcpy(pDecoder->pVal, ofpCell + vLen - nLeft, bytes);
nLeft -= bytes;
memcpy(&pgno, ofpCell + vLen - nLeft + bytes, sizeof(pgno));
nLeft -= bytes;
}
}
}
......@@ -1404,31 +1406,31 @@ static int tdbBtreeCellSize(const SPage *pPage, SCell *pCell, int dropOfp, TXN *
// free ofp pages' cells
if (dropOfp) {
int ret = 0;
SPgno pgno = *(SPgno *) (pCell + nHeader + nLocal - sizeof(SPgno));
int nLeft = nPayload - nLocal + sizeof(SPgno);
int ret = 0;
SPgno pgno = *(SPgno *)(pCell + nHeader + nLocal - sizeof(SPgno));
int nLeft = nPayload - nLocal + sizeof(SPgno);
SPage *ofp;
int bytes;
int bytes;
while (pgno != 0) {
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
SCell *ofpCell = tdbPageGetCell(ofp, 0);
SCell *ofpCell = tdbPageGetCell(ofp, 0);
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
bytes = nLeft;
} else {
bytes = ofp->maxLocal - sizeof(SPgno);
}
if (nLeft <= ofp->maxLocal - sizeof(SPgno)) {
bytes = nLeft;
} else {
bytes = ofp->maxLocal - sizeof(SPgno);
}
memcpy(&pgno, ofpCell + bytes, sizeof(pgno));
memcpy(&pgno, ofpCell + bytes, sizeof(pgno));
tdbPagerReturnPage(pPage->pPager, ofp, pTxn);
tdbPagerReturnPage(pPage->pPager, ofp, pTxn);
nLeft -= bytes;
nLeft -= bytes;
}
}
......@@ -1932,7 +1934,7 @@ int tdbBtcUpsert(SBTC *pBtc, const void *pKey, int kLen, const void *pData, int
// alloc space
szBuf = kLen + nData + 14;
pBuf = tdbRealloc(pBtc->pBt->pBuf, pBtc->pBt->pageSize > szBuf ? szBuf : pBtc->pBt->pageSize);
pBuf = tdbRealloc(pBtc->pBt->pBuf, pBtc->pBt->pageSize > szBuf ? szBuf : pBtc->pBt->pageSize);
if (pBuf == NULL) {
ASSERT(0);
return -1;
......
......@@ -98,7 +98,7 @@ int tdbPagerClose(SPager *pPager) {
return 0;
}
int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate) {
int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate, SBTree *pBt) {
SPgno pgno;
SPage *pPage;
int ret;
......@@ -110,25 +110,41 @@ int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate) {
}
{
// TODO: try to search the main DB to get the page number
// pgno = 0;
// TODO: try to search the main DB to get the page number
// pgno = 0;
}
// if (pgno == 0 && toCreate) {
// ret = tdbPagerAllocPage(pPager, &pPage, &pgno);
// if (ret < 0) {
// return -1;
// }
if (pgno == 0 && toCreate) {
// allocate a new child page
TXN txn;
tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, 0);
// // TODO: Need to zero the page
pPager->inTran = 1;
// ret = tdbPagerWrite(pPager, pPage);
// if (ret < 0) {
// return -1;
// }
// }
SBtreeInitPageArg zArg;
zArg.flags = 0x1 | 0x2; // root leaf node;
zArg.pBt = pBt;
ret = tdbPagerFetchPage(pPager, &pgno, &pPage, tdbBtreeInitPage, &zArg, &txn);
if (ret < 0) {
return -1;
}
// ret = tdbPagerAllocPage(pPager, &pPage, &pgno);
// if (ret < 0) {
// return -1;
//}
// TODO: Need to zero the page
ret = tdbPagerWrite(pPager, pPage);
if (ret < 0) {
return -1;
}
*ppgno = pgno;
tdbTxnClose(&txn);
}
*ppgno = pgno;
return 0;
}
......@@ -427,9 +443,9 @@ static int tdbPagerWritePageToDB(SPager *pPager, SPage *pPage) {
}
int tdbPagerRestore(SPager *pPager, SBTree *pBt) {
int ret = 0;
int ret = 0;
SPgno journalSize = 0;
u8 *pageBuf = NULL;
u8 *pageBuf = NULL;
tdb_fd_t jfd = tdbOsOpen(pPager->jFileName, TDB_O_RDWR, 0755);
if (jfd == NULL) {
......@@ -454,7 +470,7 @@ int tdbPagerRestore(SPager *pPager, SBTree *pBt) {
for (int pgIndex = 0; pgIndex < journalSize; ++pgIndex) {
// read pgno & the page from journal
SPgno pgno;
SPgno pgno;
SPage *pPage;
int ret = tdbOsRead(jfd, &pgno, sizeof(pgno));
......
......@@ -128,13 +128,13 @@ typedef struct SBtInfo {
#define TDB_CELLDECODER_FREE_VAL(pCellDecoder) ((pCellDecoder)->freeKV & TDB_CELLD_F_VAL)
typedef struct {
int kLen;
u8 *pKey;
int vLen;
u8 *pVal;
SPgno pgno;
u8 *pBuf;
u8 freeKV;
int kLen;
u8 *pKey;
int vLen;
u8 *pVal;
SPgno pgno;
u8 *pBuf;
u8 freeKV;
} SCellDecoder;
struct SBTC {
......@@ -184,7 +184,7 @@ int tdbBtcUpsert(SBTC *pBtc, const void *pKey, int kLen, const void *pData, int
int tdbPagerOpen(SPCache *pCache, const char *fileName, SPager **ppPager);
int tdbPagerClose(SPager *pPager);
int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate);
int tdbPagerOpenDB(SPager *pPager, SPgno *ppgno, bool toCreate, SBTree *pBt);
int tdbPagerWrite(SPager *pPager, SPage *pPage);
int tdbPagerBegin(SPager *pPager, TXN *pTxn);
int tdbPagerCommit(SPager *pPager, TXN *pTxn);
......@@ -192,7 +192,7 @@ int tdbPagerFetchPage(SPager *pPager, SPgno *ppgno, SPage **ppPage, int (*initP
TXN *pTxn);
void tdbPagerReturnPage(SPager *pPager, SPage *pPage, TXN *pTxn);
int tdbPagerAllocPage(SPager *pPager, SPgno *ppgno);
int tdbPagerRestore(SPager *pPager, SBTree *pBt);
int tdbPagerRestore(SPager *pPager, SBTree *pBt);
// tdbPCache.c ====================================
#define TDB_PCACHE_PAGE \
......@@ -314,19 +314,18 @@ static inline int tdbTryLockPage(tdb_spinlock_t *pLock) {
#define TDB_TRY_LOCK_PAGE(pPage) tdbTryLockPage(&((pPage)->lock))
// APIs
#define TDB_PAGE_TOTAL_CELLS(pPage) ((pPage)->nOverflow + (pPage)->pPageMethods->getCellNum(pPage))
#define TDB_PAGE_USABLE_SIZE(pPage) ((u8 *)(pPage)->pPageFtr - (pPage)->pCellIdx)
#define TDB_PAGE_FREE_SIZE(pPage) (*(pPage)->pPageMethods->getFreeBytes)(pPage)
#define TDB_PAGE_PGNO(pPage) ((pPage)->pgid.pgno)
#define TDB_BYTES_CELL_TAKEN(pPage, pCell) ((*(pPage)->xCellSize)(pPage, pCell, 0, NULL, NULL) + (pPage)->pPageMethods->szOffset)
#define TDB_PAGE_OFFSET_SIZE(pPage) ((pPage)->pPageMethods->szOffset)
#define TDB_PAGE_TOTAL_CELLS(pPage) ((pPage)->nOverflow + (pPage)->pPageMethods->getCellNum(pPage))
#define TDB_PAGE_USABLE_SIZE(pPage) ((u8 *)(pPage)->pPageFtr - (pPage)->pCellIdx)
#define TDB_PAGE_FREE_SIZE(pPage) (*(pPage)->pPageMethods->getFreeBytes)(pPage)
#define TDB_PAGE_PGNO(pPage) ((pPage)->pgid.pgno)
#define TDB_BYTES_CELL_TAKEN(pPage, pCell) \
((*(pPage)->xCellSize)(pPage, pCell, 0, NULL, NULL) + (pPage)->pPageMethods->szOffset)
#define TDB_PAGE_OFFSET_SIZE(pPage) ((pPage)->pPageMethods->szOffset)
int tdbPageCreate(int pageSize, SPage **ppPage, void *(*xMalloc)(void *, size_t), void *arg);
int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg);
void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int,
TXN *, SBTree *pBt));
void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int,
TXN *, SBTree *pBt));
void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt));
void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt));
int tdbPageInsertCell(SPage *pPage, int idx, SCell *pCell, int szCell, u8 asOvfl);
int tdbPageDropCell(SPage *pPage, int idx, TXN *pTxn, SBTree *pBt);
int tdbPageUpdateCell(SPage *pPage, int idx, SCell *pCell, int szCell, TXN *pTxn, SBTree *pBt);
......
......@@ -402,8 +402,9 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
} else {
pHead->msgType = pMsg->msgType;
// set up resp msg type
if (pHead->msgType == 0 && transMsgLenFromCont(pMsg->contLen) == sizeof(STransMsgHead))
if (pHead->msgType == 0 && transMsgLenFromCont(pMsg->contLen) == sizeof(STransMsgHead)) {
pHead->msgType = pConn->inType + 1;
}
}
}
......
......@@ -637,6 +637,7 @@ void dBufSetBufPageRecycled(SDiskbasedBuf* pBuf, void* pPage) {
SListNode* pNode = tdListPopNode(pBuf->lruList, ppi->pn);
taosMemoryFreeClear(ppi->pData);
taosMemoryFreeClear(pNode);
ppi->pn = NULL;
tdListAppend(pBuf->freePgList, &ppi);
}
......
......@@ -14,7 +14,7 @@ sql_error alter table db.stb MODIFY tag ts int
sql_error alter table db.stb MODIFY tag t2 binary(3)
sql_error alter table db.stb MODIFY tag t2 int
sql_error alter table db.stb MODIFY tag t1 int
sql create table db.ctb using db.stb tags(101, "12345")
sql create table db.ctb using db.stb tags(101, "123")
sql insert into db.ctb values(now, 1, "1234")
sql select * from db.stb
......@@ -32,7 +32,7 @@ endi
if $data[0][3] != 101 then
return -1
endi
if $data[0][4] != 1234 then
if $data[0][4] != 123 then
return -1
endi
......
......@@ -14,7 +14,7 @@ sql_error alter table db.stb rename tag ts c3
sql_error alter table db.stb rename tag t2 t1
sql_error alter table db.stb rename tag t2 t2
sql_error alter table db.stb rename tag t1 t2
sql create table db.ctb using db.stb tags(101, "12345")
sql create table db.ctb using db.stb tags(101, "123")
sql insert into db.ctb values(now, 1, "1234")
sql select * from db.stb
......@@ -32,7 +32,7 @@ endi
if $data[0][3] != 101 then
return -1
endi
if $data[0][4] != 1234 then
if $data[0][4] != 123 then
return -1
endi
......@@ -56,7 +56,7 @@ endi
if $data[0][3] != 101 then
return -1
endi
if $data[0][4] != 1234 then
if $data[0][4] != 123 then
return -1
endi
......
......@@ -26,8 +26,8 @@ sql show users
sql alter user u1 sysinfo 1
sql alter user u1 enable 1
sql alter user u1 pass 'taosdata'
sql alter user u2 sysinfo 0
sql drop user u1
sql_error alter user u2 sysinfo 0
_OVER:
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start -v
sql connect
print =============== step1: create drop show dnodes
$x = 0
step1:
$x = $x + 1
sleep 1000
if $x == 10 then
print ====> dnode not ready!
return -1
endi
sql show dnodes
print ===> $data00 $data01 $data02 $data03 $data04 $data05
if $rows != 1 then
return -1
endi
print =============== step2: create db
sql create database db vgroups 1
_OVER:
system sh/exec.sh -n dnode1 -s stop -x SIGINT
......@@ -14,7 +14,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)
def prepare_datas(self):
tdSql.execute(
'''create table stb1
......@@ -22,7 +22,7 @@ class TDTestCase:
tags (t1 int)
'''
)
tdSql.execute(
'''
create table t1
......@@ -64,7 +64,7 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
def check_avg(self ,origin_query , check_query):
avg_result = tdSql.getResult(origin_query)
origin_result = tdSql.getResult(check_query)
......@@ -73,13 +73,13 @@ class TDTestCase:
for row_index , row in enumerate(avg_result):
for col_index , elem in enumerate(row):
if avg_result[row_index][col_index] != origin_result[row_index][col_index]:
check_status = False
check_status = False
if not check_status:
tdLog.notice("avg function value has not as expected , sql is \"%s\" "%origin_query )
sys.exit(1)
else:
tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query )
def test_errors(self):
error_sql_lists = [
"select avg from t1",
......@@ -113,42 +113,42 @@ class TDTestCase:
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
def support_types(self):
type_error_sql_lists = [
"select avg(ts) from t1" ,
"select avg(ts) from t1" ,
"select avg(c7) from t1",
"select avg(c8) from t1",
"select avg(c9) from t1",
"select avg(ts) from ct1" ,
"select avg(ts) from ct1" ,
"select avg(c7) from ct1",
"select avg(c8) from ct1",
"select avg(c9) from ct1",
"select avg(ts) from ct3" ,
"select avg(ts) from ct3" ,
"select avg(c7) from ct3",
"select avg(c8) from ct3",
"select avg(c9) from ct3",
"select avg(ts) from ct4" ,
"select avg(ts) from ct4" ,
"select avg(c7) from ct4",
"select avg(c8) from ct4",
"select avg(c9) from ct4",
"select avg(ts) from stb1" ,
"select avg(ts) from stb1" ,
"select avg(c7) from stb1",
"select avg(c8) from stb1",
"select avg(c9) from stb1" ,
"select avg(ts) from stbbb1" ,
"select avg(ts) from stbbb1" ,
"select avg(c7) from stbbb1",
"select avg(ts) from tbname",
"select avg(c9) from tbname"
]
for type_sql in type_error_sql_lists:
tdSql.error(type_sql)
type_sql_lists = [
"select avg(c1) from t1",
"select avg(c2) from t1",
......@@ -178,16 +178,16 @@ class TDTestCase:
"select avg(c5) from stb1",
"select avg(c6) from stb1",
"select avg(c6) as alisb from stb1",
"select avg(c6) alisb from stb1",
"select avg(c6) as alisb from stb1",
"select avg(c6) alisb from stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
def basic_avg_function(self):
# basic query
# basic query
tdSql.query("select c1 from ct3")
tdSql.checkRows(0)
tdSql.query("select c1 from t1")
......@@ -207,18 +207,18 @@ class TDTestCase:
tdSql.query("select avg(c5) from ct3")
tdSql.checkRows(0)
tdSql.query("select avg(c6) from ct3")
# used for regular table
tdSql.query("select avg(c1) from t1")
tdSql.checkData(0, 0, 5.000000000)
tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
self.check_avg(" select avg(c1) , avg(c2) , avg(c3) from t1 " , " select sum(c1)/count(c1) , sum(c2)/count(c2) , sum(c3)/count(c3) from t1 ")
# used for sub table
tdSql.query("select avg(c1) from ct1")
tdSql.checkData(0, 0, 4.846153846)
......@@ -229,8 +229,8 @@ class TDTestCase:
self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from t1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from t1 ")
self.check_avg(" select avg(abs(c1)) , avg(abs(c2)) , avg(abs(c3)) from stb1 " , " select sum(abs(c1))/count(c1) , sum(abs(c2))/count(c2) , sum(abs(c3))/count(c3) from stb1 ")
# used for stable table
# used for stable table
tdSql.query("select avg(c1) from stb1")
tdSql.checkRows(1)
......@@ -241,10 +241,10 @@ class TDTestCase:
tdSql.error("select avg(c1) from tbname")
tdSql.error("select avg(c1) from ct5")
# mix with common col
# mix with common col
tdSql.error("select c1, avg(c1) from ct1")
tdSql.error("select c1, avg(c1) from ct4")
# mix with common functions
tdSql.error("select c1, avg(c1),c5, floor(c5) from ct4 ")
......@@ -278,11 +278,11 @@ class TDTestCase:
tdSql.query("select count(*) from stb1 ")
tdSql.checkData(0,0,25)
# bug fix for compute
# bug fix for compute
tdSql.error("select c1, avg(c1) -0 ,ceil(c1)-0 from ct4 ")
tdSql.error(" select c1, avg(c1) -0 ,avg(ceil(c1-0.1))-0.1 from ct4")
# mix with nest query
# mix with nest query
self.check_avg("select avg(col) from (select abs(c1) col from stb1)" , "select avg(abs(c1)) from stb1")
self.check_avg("select avg(col) from (select ceil(abs(c1)) col from stb1)" , "select avg(abs(c1)) from stb1")
......@@ -297,7 +297,7 @@ class TDTestCase:
tdSql.query(" select avg(c1) from stb1 where c1 is null ")
tdSql.checkRows(0)
def avg_func_filter(self):
tdSql.execute("use db")
tdSql.query(" select avg(c1), avg(c1) -0 ,avg(ceil(c1-0.1))-0 ,avg(floor(c1+0.1))-0.1 ,avg(ceil(log(c1,2)-0.5)) from ct4 where c1>5 ")
......@@ -324,7 +324,7 @@ class TDTestCase:
def avg_Arithmetic(self):
pass
def check_boundary_values(self):
tdSql.execute("drop database if exists bound_test")
......@@ -344,11 +344,11 @@ class TDTestCase:
tdSql.execute(
f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into sub1_bound values ( now(), 2147483645, 9223372036854775805, 32765, 125, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
f"insert into sub1_bound values ( now(), 2147483644, 9223372036854775804, 32764, 124, 3.40E+37, 1.7e+307, True, 'binary_tb1', 'nchar_tb1', now() )"
)
......@@ -359,14 +359,14 @@ class TDTestCase:
tdSql.execute(
f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
self.check_avg("select avg(c1), avg(c2), avg(c3) , avg(c4), avg(c5) ,avg(c6) from sub1_bound " , " select sum(c1)/count(c1), sum(c2)/count(c2) ,sum(c3)/count(c3), sum(c4)/count(c4), sum(c5)/count(c5) ,sum(c6)/count(c6) from sub1_bound ")
# check basic elem for table per row
tdSql.query("select avg(c1) ,avg(c2) , avg(c3) , avg(c4), avg(c5), avg(c6) from sub1_bound ")
tdSql.checkRows(1)
......@@ -376,8 +376,8 @@ class TDTestCase:
tdSql.checkData(0,3,53.571428571)
tdSql.checkData(0,4,5.828571332045761e+37)
# tdSql.checkData(0,5,None)
# check + - * / in functions
tdSql.query(" select avg(c1+1) ,avg(c2) , avg(c3*1) , avg(c4/2), avg(c5)/2, avg(c6) from sub1_bound ")
tdSql.checkData(0,0,920350134.5714285)
......@@ -386,33 +386,33 @@ class TDTestCase:
tdSql.checkData(0,3,26.785714286)
tdSql.checkData(0,4,2.9142856660228804e+37)
# tdSql.checkData(0,5,None)
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
self.prepare_datas()
tdLog.printNoPrefix("==========step2:test errors ==============")
tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
tdLog.printNoPrefix("==========step3:support types ============")
tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
tdLog.printNoPrefix("==========step4: avg basic query ============")
tdLog.printNoPrefix("==========step4: avg basic query ============")
self.basic_avg_function()
tdLog.printNoPrefix("==========step5: avg boundary query ============")
tdLog.printNoPrefix("==========step5: avg boundary query ============")
self.check_boundary_values()
tdLog.printNoPrefix("==========step6: avg filter query ============")
tdLog.printNoPrefix("==========step6: avg filter query ============")
self.avg_func_filter()
......
......@@ -687,8 +687,8 @@ class TDTestCase:
# to_json()
tdSql.query("select to_json('{\"abc\":123}') from jsons1_1")
tdSql.checkRows(2)
# tdSql.checkData(0, 0, '{"abc":123}')
# tdSql.checkData(1, 0, '{"abc":123}')
tdSql.checkData(0, 0, '{"abc":123}')
tdSql.checkData(1, 0, '{"abc":123}')
tdSql.query("select to_json('null') from jsons1_1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 'null')
......
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 1
self.ctbNum = 1
self.rowsPerTbl = 100000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 100000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
tdDnodes.start(1)
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
tmqCom.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 1
expectrowcnt = math.ceil(totalRowsInserted/3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
firstConsumeRows = resultList[0]
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(totalRowsInserted/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
secondConsumeRows = resultList[0]
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 3
expectrowcnt = math.ceil(totalRowsInserted/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
thirdConsumeRows = resultList[0]
if not (totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# total consume
actConsumeTotalRows = firstConsumeRows + secondConsumeRows + thirdConsumeRows
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 4
self.ctbNum = 3000
self.rowsPerTbl = 150
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 200,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
# tdDnodes.start(1)
tdDnodes.starttaosd(1)
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# tmqCom.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 20,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5))
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
tdLog.info("select result rows: %d"%totalRowsInserted)
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 1
expectrowcnt = math.ceil(totalRowsInserted/3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
firstConsumeRows = resultList[0]
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(totalRowsInserted*2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = firstConsumeRows + resultList[0]
if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows):
tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 4
self.ctbNum = 3000
self.rowsPerTbl = 70
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
# tdDnodes.start(1)
tdDnodes.starttaosd(1)
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("%d tmq consume rows error!"%consumerId)
# tmqCom.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 1
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] / 3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]):
tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
firstConsumeRows = resultList[0]
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 2
expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = firstConsumeRows + resultList[0]
if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows):
tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0]))
tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase1()
self.tmqCase2()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
import taos
import sys
import time
import socket
import os
import threading
import math
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 1
self.ctbNum = 1
self.rowsPerTbl = 1000000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 10,
'rowsPerTbl': 10000,
'batchNum': 1000,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
tdDnodes.stop(1)
tdDnodes.start(1)
return
def tmqCase3(self):
tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 15,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 3
expectrowcnt = math.ceil(totalRowsInserted/3)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:1000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
consumerId = 4
expectrowcnt = math.ceil(totalRowsInserted * 2/3)
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0] + resultList[1]
if not (totalRowsInserted == actConsumeTotalRows):
tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 3 end ...... ")
def tmqCase4(self):
tdLog.printNoPrefix("======== test case 4: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 25,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
topicNameList = ['topic1']
expectRowsList = []
tmqCom.initConsumerTable()
tdLog.info("create topics from stb with filter")
queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379)
# queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName'])
# sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
totalRowsInserted = expectRowsList[0]
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 5
expectrowcnt = math.ceil(totalRowsInserted)
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:500, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 0")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait commit notify")
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("pkill consume processor")
tdCom.killProcessor("tmq_sim")
# time.sleep(10)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
consumerId = 6
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
actConsumeTotalRows = resultList[0]
if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted):
tdLog.info("act consume rows: %d"%(actConsumeTotalRows))
tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted))
tdLog.exit("%d tmq consume rows error!"%consumerId)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 4 end ...... ")
def run(self):
tdSql.prepare()
self.prepareTestEnv()
self.tmqCase3()
self.tmqCase4()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
......@@ -21,7 +21,7 @@ python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py
python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py
python3 ./test.py -f 1-insert/test_stmt_set_tbname_tag.py
python3 ./test.py -f 1-insert/alter_stable.py
python3 ./test.py -f 1-insert/alter_table.py
#python3 ./test.py -f 1-insert/alter_table.py
python3 ./test.py -f 1-insert/insertWithMoreVgroup.py
python3 ./test.py -f 1-insert/table_comment.py
python3 ./test.py -f 1-insert/time_range_wise.py
......@@ -118,7 +118,7 @@ python3 ./test.py -f 2-query/twa.py
python3 ./test.py -f 2-query/irate.py
python3 ./test.py -f 2-query/function_null.py
python3 ./test.py -f 2-query/queryQnode.py
#python3 ./test.py -f 2-query/queryQnode.py
#python3 ./test.py -f 6-cluster/5dnode1mnode.py
#python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
......@@ -157,7 +157,7 @@ python3 ./test.py -f 7-tmq/tmqCheckData1.py
python3 ./test.py -f 7-tmq/tmqUdf.py
#python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 5
python3 ./test.py -f 7-tmq/tmqConsumerGroup.py
python3 ./test.py -f 7-tmq/tmqShow.py
#python3 ./test.py -f 7-tmq/tmqShow.py
python3 ./test.py -f 7-tmq/tmqAlterSchema.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1.py
......@@ -165,3 +165,10 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册