提交 12f9a3f3 编写于 作者: S Shengliang Guan

Merge remote-tracking branch 'origin/3.0' into fix/mnode

......@@ -25,7 +25,7 @@ int32_t init_env() {
return -1;
}
TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 2");
if (taos_errno(pRes) != 0) {
printf("error in create db, reason:%s\n", taos_errstr(pRes));
return -1;
......@@ -68,6 +68,14 @@ int32_t init_env() {
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists tu3 using st1 tags(3)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table tu3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
return 0;
}
......@@ -90,10 +98,9 @@ int32_t create_stream() {
/*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/
/*const char* sql = "select sum(k) from tu1 interval(10m)";*/
/*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/
pRes = taos_query(
pConn,
"create stream stream1 trigger window_close watermark 10s into outstb as select _wstartts, sum(k) from st1 "
"interval(10s) ");
pRes = taos_query(pConn,
"create stream stream1 trigger at_once into outstb as select _wstartts, sum(k) from st1 "
"partition by tbname interval(10s) ");
if (taos_errno(pRes) != 0) {
printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes));
return -1;
......
......@@ -124,6 +124,7 @@ typedef enum EFunctionType {
FUNCTION_TYPE_BLOCK_DIST, // block distribution aggregate function
FUNCTION_TYPE_BLOCK_DIST_INFO, // block distribution pseudo column function
FUNCTION_TYPE_TO_COLUMN,
FUNCTION_TYPE_GROUP_KEY,
// distributed splitting functions
FUNCTION_TYPE_APERCENTILE_PARTIAL = 4000,
......
......@@ -458,6 +458,7 @@ typedef struct SSubplan {
int32_t msgType; // message type for subplan, used to denote the send message type to vnode.
int32_t level; // the execution level of current subplan, starting from 0 in a top-down manner.
char dbFName[TSDB_DB_FNAME_LEN];
char user[TSDB_USER_LEN];
SQueryNodeAddr execNode; // for the scan/modify subplan, the optional execution node
SQueryNodeStat execNodeStat; // only for scan subplan
SNodeList* pChildren; // the datasource subplan,from which to fetch the result
......
......@@ -24,18 +24,19 @@ extern "C" {
#include "taos.h"
typedef struct SPlanContext {
uint64_t queryId;
int32_t acctId;
SEpSet mgmtEpSet;
SNode* pAstRoot;
bool topicQuery;
bool streamQuery;
bool rSmaQuery;
bool showRewrite;
int8_t triggerType;
int64_t watermark;
char* pMsg;
int32_t msgLen;
uint64_t queryId;
int32_t acctId;
SEpSet mgmtEpSet;
SNode* pAstRoot;
bool topicQuery;
bool streamQuery;
bool rSmaQuery;
bool showRewrite;
int8_t triggerType;
int64_t watermark;
char* pMsg;
int32_t msgLen;
const char* pUser;
} SPlanContext;
// Create the physical plan for the query, according to the AST.
......
......@@ -59,7 +59,7 @@ static STscObj* taosConnectImpl(const char* user, const char* auth, const char*
SAppInstInfo* pAppInfo, int connType);
STscObj* taos_connect_internal(const char* ip, const char* user, const char* pass, const char* auth, const char* db,
uint16_t port, int connType) {
uint16_t port, int connType) {
if (taos_init() != TSDB_CODE_SUCCESS) {
return NULL;
}
......@@ -327,8 +327,8 @@ bool qnodeRequired(SRequestObj* pRequest) {
}
SAppInstInfo* pInfo = pRequest->pTscObj->pAppInfo;
bool required = false;
bool required = false;
taosThreadMutexLock(&pInfo->qnodeMutex);
required = (NULL == pInfo->pQnodeList);
taosThreadMutexUnlock(&pInfo->qnodeMutex);
......@@ -376,7 +376,8 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra
.pAstRoot = pQuery->pRoot,
.showRewrite = pQuery->showRewrite,
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE};
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
.pUser = pRequest->pTscObj->user};
return qCreateQueryPlan(&cxt, pPlan, pNodeList);
}
......@@ -433,11 +434,11 @@ int32_t buildVnodePolicyNodeList(SRequestObj* pRequest, SArray** pNodeList, SArr
}
for (int32_t j = 0; j < vgNum; ++j) {
SVgroupInfo* pInfo = taosArrayGet(pVg, j);
SVgroupInfo* pInfo = taosArrayGet(pVg, j);
SQueryNodeLoad load = {0};
load.addr.nodeId = pInfo->vgId;
load.addr.epSet = pInfo->epSet;
taosArrayPush(nodeList, &load);
}
}
......@@ -495,17 +496,16 @@ _return:
return TSDB_CODE_SUCCESS;
}
int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray* pMnodeList, SMetaData *pResultMeta) {
int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray* pMnodeList, SMetaData* pResultMeta) {
SArray* pDbVgList = NULL;
SArray* pQnodeList = NULL;
int32_t code = 0;
switch (tsQueryPolicy) {
case QUERY_POLICY_VNODE: {
if (pResultMeta) {
pDbVgList = taosArrayInit(4, POINTER_BYTES);
int32_t dbNum = taosArrayGetSize(pResultMeta->pDbVgroup);
for (int32_t i = 0; i < dbNum; ++i) {
SMetaRes* pRes = taosArrayGet(pResultMeta->pDbVgroup, i);
......@@ -514,9 +514,9 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray
}
taosArrayPush(pDbVgList, &pRes->pRes);
}
}
}
code = buildVnodePolicyNodeList(pRequest, pNodeList, pMnodeList, pDbVgList);
break;
}
......@@ -537,7 +537,7 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray
}
taosThreadMutexUnlock(&pInst->qnodeMutex);
}
code = buildQnodePolicyNodeList(pRequest, pNodeList, pMnodeList, pQnodeList);
break;
}
......@@ -548,7 +548,7 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray
taosArrayDestroy(pDbVgList);
taosArrayDestroy(pQnodeList);
return code;
}
......@@ -556,43 +556,43 @@ int32_t buildSyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray*
SArray* pDbVgList = NULL;
SArray* pQnodeList = NULL;
int32_t code = 0;
switch (tsQueryPolicy) {
case QUERY_POLICY_VNODE: {
int32_t dbNum = taosArrayGetSize(pRequest->dbList);
if (dbNum > 0) {
SCatalog* pCtg = NULL;
SCatalog* pCtg = NULL;
SAppInstInfo* pInst = pRequest->pTscObj->pAppInfo;
code = catalogGetHandle(pInst->clusterId, &pCtg);
if (code != TSDB_CODE_SUCCESS) {
goto _return;
}
pDbVgList = taosArrayInit(dbNum, POINTER_BYTES);
pDbVgList = taosArrayInit(dbNum, POINTER_BYTES);
SArray* pVgList = NULL;
for (int32_t i = 0; i < dbNum; ++i) {
char* dbFName = taosArrayGet(pRequest->dbList, i);
char* dbFName = taosArrayGet(pRequest->dbList, i);
SRequestConnInfo conn = {.pTrans = pInst->pTransporter,
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = getEpSet_s(&pInst->mgmtEp)};
.mgmtEps = getEpSet_s(&pInst->mgmtEp)};
code = catalogGetDBVgInfo(pCtg, &conn, dbFName, &pVgList);
if (code) {
goto _return;
}
taosArrayPush(pDbVgList, &pVgList);
}
}
}
code = buildVnodePolicyNodeList(pRequest, pNodeList, pMnodeList, pDbVgList);
break;
}
case QUERY_POLICY_HYBRID:
case QUERY_POLICY_QNODE: {
getQnodeList(pRequest, &pQnodeList);
code = buildQnodePolicyNodeList(pRequest, pNodeList, pMnodeList, pQnodeList);
break;
}
......@@ -605,11 +605,10 @@ _return:
taosArrayDestroy(pDbVgList);
taosArrayDestroy(pQnodeList);
return code;
}
int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList) {
tsem_init(&schdRspSem, 0, 0);
......@@ -833,8 +832,8 @@ void schedulerExecCb(SQueryResult* pResult, void* param, int32_t code) {
}
}
tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%d - %s, reqId:0x%" PRIx64,
pRequest->self, code, tstrerror(code), pRequest->requestId);
tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code,
tstrerror(code), pRequest->requestId);
STscObj* pTscObj = pRequest->pTscObj;
if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code)) {
......@@ -880,7 +879,7 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
SArray* pNodeList = NULL;
buildSyncExecNodeList(pRequest, &pNodeList, pMnodeList);
code = scheduleQuery(pRequest, pRequest->body.pDag, pNodeList);
taosArrayDestroy(pNodeList);
}
......@@ -935,7 +934,7 @@ SRequestObj* launchQuery(STscObj* pTscObj, const char* sql, int sqlLen, bool val
return launchQueryImpl(pRequest, pQuery, false, NULL);
}
void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData *pResultMeta) {
void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta) {
int32_t code = 0;
switch (pQuery->execMode) {
......@@ -956,7 +955,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData *pResultM
.pAstRoot = pQuery->pRoot,
.showRewrite = pQuery->showRewrite,
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE};
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
.pUser = pRequest->pTscObj->user};
SAppInstInfo* pAppInfo = getAppInfo(pRequest);
code = qCreateQueryPlan(&cxt, &pRequest->body.pDag, pMnodeList);
......@@ -968,7 +968,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData *pResultM
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
SArray* pNodeList = NULL;
buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta);
SRequestConnInfo conn = {
.pTrans = pAppInfo->pTransporter, .requestId = pRequest->requestId, .requestObjRefId = pRequest->self};
SSchedulerReq req = {.pConn = &conn,
......@@ -1328,7 +1328,7 @@ TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, cons
if (pObj) {
return pObj->id;
}
return NULL;
}
......@@ -1500,10 +1500,10 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
return TSDB_CODE_SUCCESS;
}
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows){
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
char* p = (char*)pResultInfo->pData;
int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
int32_t* colLength = (int32_t*)(p + len);
len += sizeof(int32_t) * numOfCols;
......@@ -1513,7 +1513,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) {
int32_t* offset = (int32_t*)pStart;
int32_t lenTmp = numOfRows * sizeof(int32_t);
int32_t lenTmp = numOfRows * sizeof(int32_t);
len += lenTmp;
pStart += lenTmp;
......@@ -1538,7 +1538,6 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
} else {
ASSERT(0);
}
}
} else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
int32_t lenTmp = numOfRows * sizeof(int32_t);
......@@ -1562,13 +1561,13 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
break;
}
}
if(!needConvert) return TSDB_CODE_SUCCESS;
if (!needConvert) return TSDB_CODE_SUCCESS;
char* p = (char*)pResultInfo->pData;
char* p = (char*)pResultInfo->pData;
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
pResultInfo->convertJson = taosMemoryCalloc(1, dataLen);
if(pResultInfo->convertJson == NULL) return TSDB_CODE_OUT_OF_MEMORY;
if (pResultInfo->convertJson == NULL) return TSDB_CODE_OUT_OF_MEMORY;
char* p1 = pResultInfo->convertJson;
int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
......@@ -1637,7 +1636,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
ASSERT(0);
}
offset1[j]= len;
offset1[j] = len;
memcpy(pStart1 + len, dst, varDataTLen(dst));
len += varDataTLen(dst);
}
......@@ -1655,7 +1654,6 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
pStart += len;
pStart1 += len;
memcpy(pStart1, pStart, colLen);
}
pStart += colLen;
pStart1 += colLen1;
......@@ -1723,7 +1721,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
pStart += colLength[i];
}
if(convertUcs4){
if (convertUcs4) {
code = doConvertUCS4(pResultInfo, numOfRows, numOfCols, colLength);
}
......@@ -1840,17 +1838,18 @@ _OVER:
return code;
}
int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, int32_t len2, const char* str, int32_t acctId, char* db) {
int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, int32_t len2, const char* str,
int32_t acctId, char* db) {
SName name;
if (len1 <= 0) {
return -1;
}
const char *dbName = db;
const char *tbName = NULL;
int32_t dbLen = 0;
int32_t tbLen = 0;
const char* dbName = db;
const char* tbName = NULL;
int32_t dbLen = 0;
int32_t tbLen = 0;
if (len2 > 0) {
dbName = str + pos1;
dbLen = len1;
......@@ -1861,7 +1860,7 @@ int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, i
tbName = str + pos1;
tbLen = len1;
}
if (tNameSetDbName(&name, acctId, dbName, dbLen)) {
return -1;
}
......@@ -1881,18 +1880,18 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
terrno = TSDB_CODE_OUT_OF_MEMORY;
return terrno;
}
bool inEscape = false;
bool inEscape = false;
int32_t code = 0;
int32_t vIdx = 0;
int32_t vPos[2];
int32_t vLen[2];
memset(vPos, -1, sizeof(vPos));
memset(vLen, 0, sizeof(vLen));
for (int32_t i = 0; ; ++i) {
for (int32_t i = 0;; ++i) {
if (0 == *(tbList + i)) {
if (vPos[vIdx] >= 0 && vLen[vIdx] <= 0) {
vLen[vIdx] = i - vPos[vIdx];
......@@ -1905,7 +1904,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
break;
}
if ('`' == *(tbList + i)) {
inEscape = !inEscape;
if (!inEscape) {
......@@ -1952,7 +1951,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
if (code) {
goto _return;
}
memset(vPos, -1, sizeof(vPos));
memset(vLen, 0, sizeof(vLen));
vIdx = 0;
......@@ -1966,8 +1965,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
continue;
}
if (('a' <= *(tbList + i) && 'z' >= *(tbList + i)) ||
('A' <= *(tbList + i) && 'Z' >= *(tbList + i)) ||
if (('a' <= *(tbList + i) && 'z' >= *(tbList + i)) || ('A' <= *(tbList + i) && 'Z' >= *(tbList + i)) ||
('0' <= *(tbList + i) && '9' >= *(tbList + i))) {
if (vLen[vIdx] > 0) {
goto _return;
......@@ -1989,32 +1987,31 @@ _return:
taosArrayDestroy(*pReq);
*pReq = NULL;
return terrno;
}
void syncCatalogFn(SMetaData* pResult, void* param, int32_t code) {
SSyncQueryParam *pParam = param;
SSyncQueryParam* pParam = param;
pParam->pRequest->code = code;
tsem_post(&pParam->sem);
}
void syncQueryFn(void *param, void *res, int32_t code) {
SSyncQueryParam *pParam = param;
void syncQueryFn(void* param, void* res, int32_t code) {
SSyncQueryParam* pParam = param;
pParam->pRequest = res;
pParam->pRequest->code = code;
tsem_post(&pParam->sem);
}
void taosAsyncQueryImpl(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param, bool validateOnly) {
STscObj *pTscObj = acquireTscObj(*(int64_t *)taos);
void taosAsyncQueryImpl(TAOS* taos, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly) {
STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
if (pTscObj == NULL || sql == NULL || NULL == fp) {
terrno = TSDB_CODE_INVALID_PARA;
if (pTscObj) {
releaseTscObj(*(int64_t *)taos);
releaseTscObj(*(int64_t*)taos);
} else {
terrno = TSDB_CODE_TSC_DISCONNECTED;
}
......@@ -2031,7 +2028,7 @@ void taosAsyncQueryImpl(TAOS *taos, const char *sql, __taos_async_fn_t fp, void
return;
}
SRequestObj *pRequest = NULL;
SRequestObj* pRequest = NULL;
int32_t code = buildRequest(pTscObj, sql, sqlLen, &pRequest);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
......@@ -2045,45 +2042,41 @@ void taosAsyncQueryImpl(TAOS *taos, const char *sql, __taos_async_fn_t fp, void
doAsyncQuery(pRequest, false);
}
TAOS_RES *taosQueryImpl(TAOS *taos, const char *sql, bool validateOnly) {
TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) {
if (NULL == taos) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return NULL;
}
STscObj *pTscObj = acquireTscObj(*(int64_t *)taos);
STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
if (pTscObj == NULL || sql == NULL) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return NULL;
}
#if SYNC_ON_TOP_OF_ASYNC
SSyncQueryParam *param = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
SSyncQueryParam* param = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
tsem_init(&param->sem, 0, 0);
taosAsyncQueryImpl(taos, sql, syncQueryFn, param, validateOnly);
tsem_wait(&param->sem);
releaseTscObj(*(int64_t *)taos);
releaseTscObj(*(int64_t*)taos);
return param->pRequest;
#else
size_t sqlLen = strlen(sql);
if (sqlLen > (size_t)TSDB_MAX_ALLOWED_SQL_LEN) {
releaseTscObj(*(int64_t *)taos);
releaseTscObj(*(int64_t*)taos);
tscError("sql string exceeds max length:%d", TSDB_MAX_ALLOWED_SQL_LEN);
terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
return NULL;
}
TAOS_RES *pRes = execQuery(pTscObj, sql, sqlLen, validateOnly);
TAOS_RES* pRes = execQuery(pTscObj, sql, sqlLen, validateOnly);
releaseTscObj(*(int64_t *)taos);
releaseTscObj(*(int64_t*)taos);
return pRes;
#endif
}
......@@ -539,21 +539,24 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) {
const STraceId *trace = &pMsg->info.traceId;
mError("msg:%p, failed to check mnode state since %s, type:%s, numOfMnodes:%d inUse:%d", pMsg, terrstr(),
TMSG_INFO(pMsg->msgType), epSet.numOfEps, epSet.inUse);
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
mInfo("mnode index:%d, ep:%s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port);
}
int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
pMsg->info.rsp = rpcMallocCont(contLen);
if (pMsg->info.rsp != NULL) {
tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet);
pMsg->info.rspLen = contLen;
terrno = TSDB_CODE_RPC_REDIRECT;
if (epSet.numOfEps > 0) {
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
mInfo("mnode index:%d, ep:%s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port);
}
int32_t contLen = tSerializeSEpSet(NULL, 0, &epSet);
pMsg->info.rsp = rpcMallocCont(contLen);
if (pMsg->info.rsp != NULL) {
tSerializeSEpSet(pMsg->info.rsp, contLen, &epSet);
pMsg->info.rspLen = contLen;
terrno = TSDB_CODE_RPC_REDIRECT;
} else {
terrno = TSDB_CODE_OUT_OF_MEMORY;
}
} else {
terrno = TSDB_CODE_OUT_OF_MEMORY;
terrno = TSDB_CODE_APP_NOT_READY;
}
return -1;
}
static int32_t mndCheckMsgContent(SRpcMsg *pMsg) {
......
......@@ -243,7 +243,7 @@ int32_t tqRetrieveDataBlock(SSDataBlock* pBlock, STqReadHandle* pHandle, uint64_
if (!tdSTSRowIterNext(&iter, pColData->info.colId, pColData->info.type, &sVal)) {
break;
}
if (colDataAppend(pColData, curRow, sVal.val, sVal.valType == TD_VTYPE_NULL) < 0) {
if (colDataAppend(pColData, curRow, sVal.val, sVal.valType != TD_VTYPE_NORM) < 0) {
goto FAIL;
}
}
......
......@@ -3902,6 +3902,7 @@ typedef struct SMergeIntervalAggOperatorInfo {
SIntervalAggOperatorInfo intervalAggOperatorInfo;
SHashObj* groupIntervalHash;
void* groupIntervalIter;
bool hasGroupId;
uint64_t groupId;
SSDataBlock* prefetchedBlock;
......@@ -3914,6 +3915,23 @@ void destroyMergeIntervalOperatorInfo(void* param, int32_t numOfOutput) {
destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo, numOfOutput);
}
static int32_t finalizeWindowResult(SOperatorInfo* pOperatorInfo, uint64_t tableGroupId, STimeWindow* win, SSDataBlock* pResultBlock) {
SMergeIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info;
SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
bool ascScan = (iaInfo->order == TSDB_ORDER_ASC);
SExprSupp* pExprSup = &pOperatorInfo->exprSupp;
SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &win->skey, TSDB_KEYSIZE, tableGroupId);
SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf,
GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
ASSERT(p1 != NULL);
finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pExprSup->pCtx, pExprSup->pExprInfo,
pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset, pResultBlock, pTaskInfo);
taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
return TSDB_CODE_SUCCESS;
}
static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t tableGroupId, SSDataBlock* pResultBlock,
STimeWindow* newWin) {
SMergeIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info;
......@@ -3928,20 +3946,9 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t
return 0;
}
if (newWin == NULL || (ascScan && newWin->skey > prevWin->ekey || (!ascScan) && newWin->skey < prevWin->ekey)) {
SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &prevWin->skey, TSDB_KEYSIZE, tableGroupId);
SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf,
GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
ASSERT(p1 != NULL);
finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pExprSup->pCtx, pExprSup->pExprInfo,
pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset, pResultBlock, pTaskInfo);
taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
if (newWin == NULL) {
taosHashRemove(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId));
} else {
taosHashPut(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId), newWin, sizeof(STimeWindow));
}
if ((ascScan && newWin->skey > prevWin->skey || (!ascScan) && newWin->skey < prevWin->skey)) {
finalizeWindowResult(pOperatorInfo, tableGroupId, prevWin, pResultBlock);
taosHashPut(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId), newWin, sizeof(STimeWindow));
}
return 0;
......@@ -4090,12 +4097,17 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
}
pRes->info.groupId = miaInfo->groupId;
} else {
void* p = taosHashIterate(miaInfo->groupIntervalHash, NULL);
if (p != NULL) {
}
if (miaInfo->inputBlocksFinished) {
void* win = taosHashIterate(miaInfo->groupIntervalHash, miaInfo->groupIntervalIter);
if (win != NULL) {
miaInfo->groupIntervalIter = win;
size_t len = 0;
uint64_t* pKey = taosHashGetKey(p, &len);
outputPrevIntervalResult(pOperator, *pKey, pRes, NULL);
uint64_t* pTableGroupId = taosHashGetKey(win, &len);
finalizeWindowResult(pOperator, *pTableGroupId, win, pRes);
pRes->info.groupId = *pTableGroupId;
}
}
......@@ -4118,6 +4130,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI
}
miaInfo->groupIntervalHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), true, HASH_NO_LOCK);
miaInfo->groupIntervalIter = NULL;
SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
......
......@@ -202,6 +202,10 @@ bool blockDistSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
int32_t blockDistFunction(SqlFunctionCtx *pCtx);
int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
bool getGroupKeyFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv);
int32_t groupKeyFunction(SqlFunctionCtx* pCtx);
int32_t groupKeyFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
#ifdef __cplusplus
}
#endif
......
......@@ -1515,6 +1515,16 @@ static bool getBlockDistFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv
return true;
}
static int32_t translateGroupKey(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return TSDB_CODE_SUCCESS;
}
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
pFunc->node.resType = ((SExprNode*)pPara)->resType;
return TSDB_CODE_SUCCESS;
}
// clang-format off
const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
......@@ -2499,7 +2509,17 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.type = FUNCTION_TYPE_BLOCK_DIST_INFO,
.classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_SCAN_PC_FUNC,
.translateFunc = translateBlockDistInfoFunc,
}
},
{
.name = "_group_key",
.type = FUNCTION_TYPE_GROUP_KEY,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateGroupKey,
.getEnvFunc = getGroupKeyFuncEnv,
.initFunc = functionSetup,
.processFunc = groupKeyFunction,
.finalizeFunc = groupKeyFinalize,
},
};
// clang-format on
......
......@@ -262,6 +262,12 @@ typedef struct SRateInfo {
int8_t hasResult; // flag to denote has value
} SRateInfo;
typedef struct SGroupKeyInfo{
bool hasResult;
char data[];
} SGroupKeyInfo;
#define SET_VAL(_info, numOfElem, res) \
do { \
if ((numOfElem) <= 0) { \
......@@ -2402,6 +2408,12 @@ bool getSelectivityFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
return true;
}
bool getGroupKeyFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
SColumnNode* pNode = (SColumnNode*)nodesListGetNode(pFunc->pParameterList, 0);
pEnv->calcMemSize = sizeof(SGroupKeyInfo) + pNode->node.resType.bytes;
return true;
}
static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowIndex) {
if (pTsColInfo == NULL) {
return 0;
......@@ -5349,6 +5361,43 @@ int32_t irateFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
return pResInfo->numOfRes;
}
int32_t groupKeyFunction(SqlFunctionCtx* pCtx) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SGroupKeyInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
SInputColumnInfoData* pInput = &pCtx->input;
SColumnInfoData* pInputCol = pInput->pData[0];
int32_t bytes = pInputCol->info.bytes;
int32_t startIndex = pInput->startRowIndex;
if (colDataIsNull_s(pInputCol, startIndex)) {
pInfo->hasResult = false;
goto _group_key_over;
}
pInfo->hasResult = true;
char* data = colDataGetData(pInputCol, startIndex);
memcpy(pInfo->data, data, bytes);
_group_key_over:
SET_VAL(pResInfo, 1, 1);
return TSDB_CODE_SUCCESS;
}
int32_t groupKeyFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SGroupKeyInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
colDataAppend(pCol, pBlock->info.rows, pInfo->data, pInfo->hasResult ? false : true);
return pResInfo->numOfRes;
}
int32_t interpFunction(SqlFunctionCtx* pCtx) {
#if 0
int32_t fillType = (int32_t) pCtx->param[2].i64;
......
......@@ -2294,6 +2294,7 @@ static const char* jkSubplanType = "SubplanType";
static const char* jkSubplanMsgType = "MsgType";
static const char* jkSubplanLevel = "Level";
static const char* jkSubplanDbFName = "DbFName";
static const char* jkSubplanUser = "User";
static const char* jkSubplanNodeAddr = "NodeAddr";
static const char* jkSubplanRootNode = "RootNode";
static const char* jkSubplanDataSink = "DataSink";
......@@ -2316,6 +2317,9 @@ static int32_t subplanToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkSubplanDbFName, pNode->dbFName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkSubplanUser, pNode->user);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkSubplanNodeAddr, queryNodeAddrToJson, &pNode->execNode);
}
......@@ -2352,6 +2356,9 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkSubplanDbFName, pNode->dbFName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkSubplanUser, pNode->user);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonToObject(pJson, jkSubplanNodeAddr, jsonToQueryNodeAddr, &pNode->execNode);
}
......
......@@ -1453,6 +1453,9 @@ static SSubplan* makeSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogicSubpl
pSubplan->id = pLogicSubplan->id;
pSubplan->subplanType = pLogicSubplan->subplanType;
pSubplan->level = pLogicSubplan->level;
if (NULL != pCxt->pPlanCxt->pUser) {
strcpy(pSubplan->user, pCxt->pPlanCxt->pUser);
}
return pSubplan;
}
......
......@@ -85,8 +85,9 @@ class PlannerTestBaseImpl {
public:
PlannerTestBaseImpl() : sqlNo_(0) {}
void useDb(const string& acctId, const string& db) {
caseEnv_.acctId_ = acctId;
void useDb(const string& user, const string& db) {
caseEnv_.acctId_ = 0;
caseEnv_.user_ = user;
caseEnv_.db_ = db;
caseEnv_.nsql_ = g_skipSql;
}
......@@ -193,7 +194,8 @@ class PlannerTestBaseImpl {
private:
struct caseEnv {
string acctId_;
int32_t acctId_;
string user_;
string db_;
int32_t nsql_;
......@@ -295,7 +297,7 @@ class PlannerTestBaseImpl {
transform(stmtEnv_.sql_.begin(), stmtEnv_.sql_.end(), stmtEnv_.sql_.begin(), ::tolower);
SParseContext cxt = {0};
cxt.acctId = atoi(caseEnv_.acctId_.c_str());
cxt.acctId = caseEnv_.acctId_;
cxt.db = caseEnv_.db_.c_str();
cxt.pSql = stmtEnv_.sql_.c_str();
cxt.sqlLen = stmtEnv_.sql_.length();
......@@ -319,12 +321,13 @@ class PlannerTestBaseImpl {
void doParseBoundSql(SQuery* pQuery) {
SParseContext cxt = {0};
cxt.acctId = atoi(caseEnv_.acctId_.c_str());
cxt.acctId = caseEnv_.acctId_;
cxt.db = caseEnv_.db_.c_str();
cxt.pSql = stmtEnv_.sql_.c_str();
cxt.sqlLen = stmtEnv_.sql_.length();
cxt.pMsg = stmtEnv_.msgBuf_.data();
cxt.msgLen = stmtEnv_.msgBuf_.max_size();
cxt.pUser = caseEnv_.user_.c_str();
DO_WITH_THROW(qStmtParseQuerySql, &cxt, pQuery);
res_.ast_ = toString(pQuery->pRoot);
......@@ -364,6 +367,7 @@ class PlannerTestBaseImpl {
void setPlanContext(SQuery* pQuery, SPlanContext* pCxt) {
pCxt->queryId = 1;
pCxt->pUser = caseEnv_.user_.c_str();
if (QUERY_NODE_CREATE_TOPIC_STMT == nodeType(pQuery->pRoot)) {
pCxt->pAstRoot = ((SCreateTopicStmt*)pQuery->pRoot)->pQuery;
pCxt->topicQuery = true;
......@@ -403,7 +407,7 @@ PlannerTestBase::PlannerTestBase() : impl_(new PlannerTestBaseImpl()) {}
PlannerTestBase::~PlannerTestBase() {}
void PlannerTestBase::useDb(const std::string& acctId, const std::string& db) { impl_->useDb(acctId, db); }
void PlannerTestBase::useDb(const std::string& user, const std::string& db) { impl_->useDb(user, db); }
void PlannerTestBase::run(const std::string& sql) { return impl_->run(sql); }
......
......@@ -30,7 +30,7 @@ class PlannerTestBase : public testing::Test {
PlannerTestBase();
virtual ~PlannerTestBase();
void useDb(const std::string& acctId, const std::string& db);
void useDb(const std::string& user, const std::string& db);
void run(const std::string& sql);
// stmt mode APIs
void prepare(const std::string& sql);
......
......@@ -63,7 +63,6 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) {
}
int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq) {
//
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
if (tEncodeI32(pEncoder, pReq->dstNodeId) < 0) return -1;
......@@ -84,7 +83,7 @@ int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq) {
if (tDecodeI32(pDecoder, &pReq->srcTaskId) < 0) return -1;
uint64_t len = 0;
if (tDecodeBinaryAlloc(pDecoder, (void**)&pReq->pRetrieve, &len) < 0) return -1;
pReq->retrieveLen = len;
pReq->retrieveLen = (int32_t)len;
tEndDecode(pDecoder);
return 0;
}
......
......@@ -157,7 +157,7 @@ step61:
if $x == 10 then
return -1
endi
sql show mnodes
sql show mnodes -x step61
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
print ===> $data20 $data21 $data22 $data23 $data24 $data25
......
import taos
import sys
import datetime
import inspect
from util.log import *
from util.sql import *
from util.cases import *
import random
class TDTestCase:
updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)
self.tb_nums = 10
self.row_nums = 20
self.ts = 1434938400000
self.time_step = 1000
def prepare_tag_datas(self):
# prepare datas
tdSql.execute(
"create database if not exists testdb keep 3650 duration 1000")
tdSql.execute(" use testdb ")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(
f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
"insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute(
"insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
"insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
"insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
"insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
"insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
"insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
def function_for_null_data(self):
function_names = ["abs" , "floor" , "ceil" , "round"]
for function_name in function_names:
scalar_sql_1 = f"select {function_name}(c1)/0 from t1 group by c1 order by c1"
scalar_sql_2 = f"select {function_name}(c1/0) from t1 group by c1 order by c1"
scalar_sql_3 = f"select {function_name}(NULL) from t1 group by c1 order by c1"
tdSql.query(scalar_sql_1)
tdSql.checkRows(10)
tdSql.checkData(0,0,None)
tdSql.checkData(9,0,None)
tdSql.query(scalar_sql_2)
tdSql.checkRows(10)
tdSql.checkData(0,0,None)
tdSql.checkData(9,0,None)
tdSql.query(scalar_sql_3)
tdSql.checkRows(10)
tdSql.checkData(0,0,None)
tdSql.checkData(9,0,None)
function_names = ["sin" ,"cos" ,"tan" ,"asin" ,"acos" ,"atan"]
PI = 3.141592654
# sin
tdSql.query(" select sin(c1/0) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select sin(NULL) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select sin(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,0.000000000)
tdSql.query(f" select sin({PI/2}) from t1 group by c1 order by c1")
tdSql.checkData(9,0,1.0)
tdSql.query(" select sin(1000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,0.826879541)
# cos
tdSql.query(" select cos(c1/0) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select cos(NULL) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select cos(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,1.000000000)
tdSql.query(f" select cos({PI}/2) from t1 group by c1 order by c1")
tdSql.query(" select cos(1000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,0.562379076)
# tan
tdSql.query(" select tan(c1/0) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select tan(NULL) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select tan(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,0.000000000)
tdSql.query(f" select tan({PI}/2) from t1 group by c1 order by c1")
tdSql.query(" select tan(1000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,1.470324156)
# atan
tdSql.query(" select atan(c1/0) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select atan(NULL) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select atan(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,0.000000000)
tdSql.query(f" select atan({PI}/2) from t1 group by c1 order by c1")
tdSql.checkData(9,0,1.003884822)
tdSql.query(" select atan(1000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,1.569796327)
# asin
tdSql.query(" select asin(c1/0) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select asin(NULL) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select asin(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,0.000000000)
tdSql.query(f" select asin({PI}/2) from t1 group by c1 order by c1")
tdSql.query(" select asin(1000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
# acos
tdSql.query(" select acos(c1/0) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select acos(NULL) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select acos(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,1.570796327)
tdSql.query(f" select acos({PI}/2) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select acos(1000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
function_names = ["log" ,"pow"]
# log
tdSql.query(" select log(-10) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select log(NULL ,2) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select log(c1)/0 from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(f" select log(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
# pow
tdSql.query(" select pow(c1,10000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select pow(c1,2)/0 from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select pow(NULL,2) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(f" select pow(c1/0 ,1 ) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
self.prepare_tag_datas()
tdLog.printNoPrefix("==========step2:test errors ==============")
self.function_for_null_data()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
import taos
import sys
import datetime
import inspect
from util.log import *
from util.sql import *
from util.cases import *
import random
class TDTestCase:
updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)
self.tb_nums = 10
self.row_nums = 20
self.ts = 1434938400000
self.time_step = 1000
def insert_datas_and_check_irate(self ,tbnums , rownums , time_step ):
tdLog.info(" prepare datas for auto check irate function ")
tdSql.execute(" create database test ")
tdSql.execute(" use test ")
tdSql.execute(" create stable stb (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\
c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int)")
for tbnum in range(tbnums):
tbname = "sub_tb_%d"%tbnum
tdSql.execute(" create table %s using stb tags(%d) "%(tbname , tbnum))
ts = self.ts
for row in range(rownums):
ts = self.ts + time_step*row
c1 = random.randint(0,1000)
c2 = random.randint(0,100000)
c3 = random.randint(0,125)
c4 = random.randint(0,125)
c5 = random.random()/1.0
c6 = random.random()/1.0
c7 = "'true'"
c8 = "'binary_val'"
c9 = "'nchar_val'"
c10 = ts
tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})")
tdSql.execute("use test")
tbnames = ["stb", "sub_tb_1"]
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
for tbname in tbnames:
tdSql.query("desc {}".format(tbname))
coltypes = tdSql.queryResult
for coltype in coltypes:
colname = coltype[0]
if coltype[1] in support_types and coltype[-1] != "TAG" :
irate_sql = "select irate({}) from (select * from {} order by tbname ) ".format(colname, tbname)
origin_sql = "select ts , {} , cast(ts as bigint) from (select ts , {} from {} order by ts desc limit 2 offset 0 ) order by ts".format(colname,colname, tbname)
tdSql.query(irate_sql)
irate_result = tdSql.queryResult
tdSql.query(origin_sql)
origin_result = tdSql.queryResult
irate_value = irate_result[0][0]
if origin_result[1][-1] - origin_result[0][-1] == 0:
comput_irate_value = 0
elif (origin_result[1][1] - origin_result[0][1])<0:
comput_irate_value = origin_result[1][1]*1000/( origin_result[1][-1] - origin_result[0][-1])
else:
comput_irate_value = (origin_result[1][1] - origin_result[0][1])*1000/( origin_result[1][-1] - origin_result[0][-1])
if comput_irate_value ==irate_value:
tdLog.info(" irate work as expected , sql is %s "% irate_sql)
else:
tdLog.exit(" irate work not as expected , sql is %s "% irate_sql)
def prepare_tag_datas(self):
# prepare datas
tdSql.execute(
"create database if not exists testdb keep 3650 duration 1000")
tdSql.execute(" use testdb ")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(
f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
"insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute(
"insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
"insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
"insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
"insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
"insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
"insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
def test_errors(self):
tdSql.execute("use testdb")
error_sql_lists = [
"select irate from t1",
"select irate(-+--+c1) from t1",
# "select +-irate(c1) from t1",
# "select ++-irate(c1) from t1",
# "select ++--irate(c1) from t1",
# "select - -irate(c1)*0 from t1",
# "select irate(tbname+1) from t1 ",
"select irate(123--123)==1 from t1",
"select irate(c1) as 'd1' from t1",
"select irate(c1 ,c2 ) from t1",
"select irate(c1 ,NULL) from t1",
"select irate(,) from t1;",
"select irate(irate(c1) ab from t1)",
"select irate(c1) as int from t1",
"select irate from stb1",
# "select irate(-+--+c1) from stb1",
# "select +-irate(c1) from stb1",
# "select ++-irate(c1) from stb1",
# "select ++--irate(c1) from stb1",
# "select - -irate(c1)*0 from stb1",
# "select irate(tbname+1) from stb1 ",
"select irate(123--123)==1 from stb1",
"select irate(c1) as 'd1' from stb1",
"select irate(c1 ,c2 ) from stb1",
"select irate(c1 ,NULL) from stb1",
"select irate(,) from stb1;",
"select irate(abs(c1) ab from stb1)",
"select irate(c1) as int from stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
def support_types(self):
tdSql.execute("use testdb")
tbnames = ["stb1", "t1", "ct1", "ct2"]
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
for tbname in tbnames:
tdSql.query("desc {}".format(tbname))
coltypes = tdSql.queryResult
for coltype in coltypes:
colname = coltype[0]
irate_sql = "select irate({}) from {}".format(colname, tbname)
if coltype[1] in support_types:
tdSql.query(irate_sql)
else:
tdSql.error(irate_sql)
def basic_irate_function(self):
# used for empty table , ct3 is empty
tdSql.query("select irate(c1) from ct3")
tdSql.checkRows(0)
tdSql.query("select irate(c2) from ct3")
tdSql.checkRows(0)
# used for regular table
tdSql.query("select irate(c1) from t1")
tdSql.checkData(0, 0, 0.000000386)
# used for sub table
tdSql.query("select irate(abs(c1+c2)) from ct1")
tdSql.checkData(0, 0, 0.000000000)
# mix with common col
tdSql.error("select c1, irate(c1) from ct1")
# mix with common functions
tdSql.error("select irate(c1), abs(c1) from ct4 ")
# agg functions mix with agg functions
tdSql.query("select irate(c1), count(c5) from stb1 partition by tbname ")
tdSql.checkData(0, 0, 0.000000000)
tdSql.checkData(1, 0, 0.000000000)
tdSql.checkData(0, 1, 13)
tdSql.checkData(1, 1, 9)
def irate_func_filter(self):
tdSql.execute("use testdb")
tdSql.query(
"select irate(c1+2)/2 from ct4 where c1>5 ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 0.000000514)
tdSql.query(
"select irate(c1+c2)/10 from ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 0.000000000)
tdSql.query(
"select irate(c1+c2)/10 from stb1 where c1 = 5 partition by tbname ")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 0.000000000)
def irate_Arithmetic(self):
pass
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
self.prepare_tag_datas()
tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
tdLog.printNoPrefix("==========step4: irate basic query ============")
self.basic_irate_function()
tdLog.printNoPrefix("==========step5: irate filter query ============")
self.irate_func_filter()
tdLog.printNoPrefix("==========step6: check result of query ============")
self.insert_datas_and_check_irate(self.tb_nums,self.row_nums,self.time_step)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
......@@ -227,6 +227,7 @@ class TDTestCase:
# fisr add three mnodes;
tdSql.execute("create mnode on dnode 2")
time.sleep(10)
tdSql.execute("create mnode on dnode 3")
# fisrt check statut ready
......
......@@ -11,7 +11,9 @@
# -*- coding: utf-8 -*-
from asyncore import loop
from collections import defaultdict
import subprocess
import random
import string
import threading
......@@ -75,7 +77,7 @@ class TMQCom:
return resultList
def startTmqSimProcess(self,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
def startTmqSimProcess(self,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0,alias=0):
buildPath = tdCom.getBuildPath()
cfgPath = tdCom.getClientCfgPath()
if valgrind == 1:
......@@ -88,30 +90,53 @@ class TMQCom:
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
processorName = buildPath + '/build/bin/tmq_sim'
if alias != 0:
processorNameNew = buildPath + '/build/bin/tmq_sim_new'
shellCmd = 'cp %s %s'%(processorName, processorNameNew)
os.system(shellCmd)
processorName = processorNameNew
shellCmd = 'nohup ' + processorName + ' -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb'):
while 1:
os.system(shellCmd)
def stopTmqSimProcess(self, processorName):
psCmd = "ps -ef|grep -w %s|grep -v grep | awk '{print $2}'"%(processorName)
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(0.2)
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
tdLog.debug("%s is stopped by kill -INT" % (processorName))
def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb',rows=1):
loopFlag = 1
while loopFlag:
tdSql.query("select * from %s.notifyinfo"%cdbName)
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
if (tdSql.getRows() == 1) and (tdSql.getData(0, 1) == 0):
break
else:
time.sleep(0.1)
actRows = tdSql.getRows()
if (actRows >= rows):
for i in range(actRows):
if tdSql.getData(i, 1) == 0:
loopFlag = 0
break
time.sleep(0.1)
return
def getStartCommitNotifyFromTmqsim(self,cdbName='cdb'):
while 1:
def getStartCommitNotifyFromTmqsim(self,cdbName='cdb',rows=2):
loopFlag = 1
while loopFlag:
tdSql.query("select * from %s.notifyinfo"%cdbName)
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
if tdSql.getRows() == 2 :
print(tdSql.getData(0, 1), tdSql.getData(1, 1))
if tdSql.getData(1, 1) == 1:
break
actRows = tdSql.getRows()
if (actRows >= rows):
for i in range(actRows):
if tdSql.getData(i, 1) == 1:
loopFlag = 0
break
time.sleep(0.1)
return
......
import taos
import sys
import time
import socket
import os
import threading
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
def checkFileContent(self, consumerId, queryString):
buildPath = tdCom.getBuildPath()
cfgPath = tdCom.getClientCfgPath()
dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId)
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
else:
break
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db1',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 10,
'rowsPerTbl': 1000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 20,
'showMsg': 1,
'showRow': 1}
topicNameList = ['topic1', 'topic2']
expectRowsList = []
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
tdLog.info("create stb")
tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
tdLog.info("create ctb")
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
tdLog.info("insert data")
tmqCom.insert_data_2(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdLog.info("create topics from stb with filter")
# queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# create one stb2
paraDict["stbName"] = 'stb2'
paraDict["ctbPrefix"] = 'ctbx'
paraDict["rowsPerTbl"] = 5000
tdLog.info("create stb2")
tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
tdLog.info("create ctb2")
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# queryString = "select ts, sin(c1), abs(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, sin(c1), abs(pow(c1,3)) from %s.%s" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = "%s,%s"%(topicNameList[0],topicNameList[1])
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:3000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("start consume processor 2")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],'cdb',0,1)
tdLog.info("async insert data")
pThread = tmqCom.asyncInsertData(paraDict)
tdLog.info("wait consumer commit notify")
tmqCom.getStartCommitNotifyFromTmqsim(rows=4)
tdLog.info("pkill one consume processor")
tmqCom.stopTmqSimProcess('tmq_sim_new')
pThread.join()
tdLog.info("wait the consume result")
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actTotalRows = 0
for i in range(len(resultList)):
actTotalRows += resultList[i]
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
expectTotalRows = 0
for i in range(len(expectRowsList)):
expectTotalRows += expectRowsList[i]
tdLog.info("act consume rows: %d, expect consume rows: %d"%(actTotalRows, expectTotalRows))
if expectTotalRows <= resultList[0]:
tdLog.info("act consume rows: %d should >= expect consume rows: %d"%(actTotalRows, expectTotalRows))
tdLog.exit("0 tmq consume rows error!")
# time.sleep(10)
# for i in range(len(topicNameList)):
# tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def run(self):
tdSql.prepare()
self.tmqCase1()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
......@@ -109,6 +109,9 @@ python3 ./test.py -f 2-query/distribute_agg_apercentile.py
python3 ./test.py -f 2-query/distribute_agg_avg.py
python3 ./test.py -f 2-query/distribute_agg_stddev.py
python3 ./test.py -f 2-query/twa.py
python3 ./test.py -f 2-query/irate.py
python3 ./test.py -f 2-query/function_null.py
python3 ./test.py -f 6-cluster/5dnode1mnode.py
python3 ./test.py -f 6-cluster/5dnode2mnode.py
......@@ -135,3 +138,4 @@ python3 ./test.py -f 7-tmq/stbFilter.py
python3 ./test.py -f 7-tmq/tmqCheckData.py
python3 ./test.py -f 7-tmq/tmqUdf.py
#python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 5
python3 ./test.py -f 7-tmq/tmqConsumerGroup.py
......@@ -93,8 +93,6 @@ static SConfInfo g_stConfInfo;
TdFilePtr g_fp = NULL;
static int running = 1;
int8_t useSnapshot = 0;
// char* g_pRowValue = NULL;
// TdFilePtr g_fp = NULL;
......@@ -126,11 +124,23 @@ char* getCurrentTimeString(char* timeString) {
return timeString;
}
static void tmqStop(int signum, void *info, void *ctx) {
running = 0;
char tmpString[128];
taosFprintfFile(g_fp, "%s tmqStop() receive stop signal[%d]\n", getCurrentTimeString(tmpString), signum);
}
static void tmqSetSignalHandle() {
taosSetSignal(SIGINT, tmqStop);
}
void initLogFile() {
char filename[256];
char tmpString[128];
sprintf(filename, "%s/../log/tmqlog_%s.txt", configDir, getCurrentTimeString(tmpString));
pid_t process_id = getpid();
sprintf(filename, "%s/../log/tmqlog-%d-%s.txt", configDir, process_id, getCurrentTimeString(tmpString));
#ifdef WINDOWS
for (int i = 2; i < sizeof(filename); i++) {
if (filename[i] == ':') filename[i] = '-';
......@@ -205,7 +215,7 @@ void parseArgument(int32_t argc, char* argv[]) {
} else if (strcmp(argv[i], "-y") == 0) {
g_stConfInfo.consumeDelay = atol(argv[++i]);
} else if (strcmp(argv[i], "-e") == 0) {
useSnapshot = (int8_t)atol(argv[++i]);
g_stConfInfo.useSnapshot = atol(argv[++i]);
} else {
pError("%s unknow para: %s %s", GREEN, argv[++i], NC);
exit(-1);
......@@ -519,7 +529,9 @@ static void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
g_once_commit_flag = 1;
notifyMainScript((SThreadInfo*)param, (int32_t)NOTIFY_CMD_START_COMMIT);
}
taosFprintfFile(g_fp, "tmq_commit_cb_print() be called\n");
char tmpString[128];
taosFprintfFile(g_fp, "%s tmq_commit_cb_print() be called\n", getCurrentTimeString(tmpString));
}
void build_consumer(SThreadInfo* pInfo) {
......@@ -552,7 +564,7 @@ void build_consumer(SThreadInfo* pInfo) {
// tmq_conf_set(conf, "auto.offset.reset", "earliest");
// tmq_conf_set(conf, "auto.offset.reset", "latest");
//
if (useSnapshot) {
if (g_stConfInfo.useSnapshot) {
tmq_conf_set(conf, "experiment.use.snapshot", "true");
}
......@@ -651,6 +663,10 @@ void loop_consume(SThreadInfo* pInfo) {
}
}
if (0 == running) {
taosFprintfFile(g_fp, "receive stop signal and not continue consume\n");
}
pInfo->consumeMsgCnt = totalMsgs;
pInfo->consumeRowCnt = totalRows;
......@@ -680,7 +696,7 @@ void* consumeThreadFunc(void* param) {
int32_t err = tmq_subscribe(pInfo->tmq, pInfo->topicList);
if (err != 0) {
pError("tmq_subscribe() fail, reason: %s\n", tmq_err2str(err));
taosFprintfFile(g_fp, "tmq_subscribe()! reason: %s\n", tmq_err2str(err));
taosFprintfFile(g_fp, "tmq_subscribe() fail! reason: %s\n", tmq_err2str(err));
assert(0);
return NULL;
}
......@@ -829,6 +845,8 @@ int main(int32_t argc, char* argv[]) {
getConsumeInfo();
saveConfigToLogFile();
tmqSetSignalHandle();
TdThreadAttr thattr;
taosThreadAttrInit(&thattr);
taosThreadAttrSetDetachState(&thattr, PTHREAD_CREATE_JOINABLE);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册