提交 b857d390 编写于 作者: X Xiaoyu Wang

merge 3.0

......@@ -43,10 +43,8 @@ def pre_test(){
sh '''
cd ${WK}
git reset --hard
git fetch || git fetch
cd ${WKC}
git reset --hard
git fetch || git fetch
'''
script {
if (env.CHANGE_TARGET == 'master') {
......@@ -82,6 +80,7 @@ def pre_test(){
if (env.CHANGE_URL =~ /\/TDengine\//) {
sh '''
cd ${WKC}
git remote prune origin
git pull >/dev/null
git log -5
echo "`date "+%Y%m%d-%H%M%S"` ${JOB_NAME}:${BRANCH_NAME}:${BUILD_ID}:${CHANGE_TARGET}" >>${WKDIR}/jenkins.log
......@@ -107,6 +106,7 @@ def pre_test(){
git log -5
echo "tdinternal log merged: `git log -5`" >>${WKDIR}/jenkins.log
cd ${WKC}
git remote prune origin
git pull >/dev/null
git log -5
echo "community log: `git log -5`" >>${WKDIR}/jenkins.log
......@@ -137,53 +137,51 @@ def pre_test_win(){
set
date /t
time /t
rd /s /Q C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug || exit 0
rd /s /Q %WIN_INTERNAL_ROOT%\\debug || exit 0
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git reset --hard
git fetch || git fetch
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git reset --hard
git fetch || git fetch
'''
script {
if (env.CHANGE_TARGET == 'master') {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git checkout master
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git checkout master
'''
} else if(env.CHANGE_TARGET == '2.0') {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git checkout 2.0
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git checkout 2.0
'''
} else if(env.CHANGE_TARGET == '3.0') {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git checkout 3.0
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git checkout 3.0
'''
} else {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git checkout develop
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git checkout develop
'''
}
......@@ -191,36 +189,38 @@ def pre_test_win(){
script {
if (env.CHANGE_URL =~ /\/TDengine\//) {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git pull
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git remote prune origin
git pull
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git fetch origin +refs/pull/%CHANGE_ID%/merge
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git checkout -qf FETCH_HEAD
'''
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git pull
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git fetch origin +refs/pull/%CHANGE_ID%/merge
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git checkout -qf FETCH_HEAD
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git remote prune origin
git pull
'''
} else {
......@@ -230,27 +230,27 @@ def pre_test_win(){
}
}
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
git branch
git log -5
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git branch
git log -5
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community
cd %WIN_COMMUNITY_ROOT%
git submodule update --init --recursive
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\taos-connector-python
cd %WIN_CONNECTOR_ROOT%
git branch
git reset --hard
git pull
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\taos-connector-python
cd %WIN_CONNECTOR_ROOT%
git log -5
'''
}
......@@ -258,7 +258,7 @@ def pre_test_build_win() {
bat '''
echo "building ..."
time /t
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal
cd %WIN_INTERNAL_ROOT%
mkdir debug
cd debug
time /t
......@@ -273,9 +273,9 @@ def pre_test_build_win() {
time /t
'''
bat '''
cd C:\\workspace\\%EXECUTOR_NUMBER%\\taos-connector-python
cd %WIN_CONNECTOR_ROOT%
python -m pip install .
xcopy /e/y/i/f C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
'''
return 1
}
......@@ -283,7 +283,7 @@ def run_win_ctest() {
bat '''
echo "windows ctest ..."
time /t
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug
cd %WIN_INTERNAL_ROOT%\\debug
ctest -j 1 || exit 7
time /t
'''
......@@ -292,12 +292,12 @@ def run_win_test() {
echo "LINUX NODE: ${linux_node_ip} - ${linux_node_pass}"
bat '''
echo "windows test ..."
cd C:\\workspace\\%EXECUTOR_NUMBER%\\taos-connector-python
cd %WIN_CONNECTOR_ROOT%
python -m pip install .
xcopy /e/y/i/f C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
ls -l C:\\Windows\\System32\\taos.dll
time /t
cd C:\\workspace\\%EXECUTOR_NUMBER%\\TDinternal\\community\\tests\\system-test
cd %WIN_SYSTEM_TEST_ROOT%
echo "node: ''' + linux_node_ip + ''':''' + linux_node_pass + '''"
echo "testing ..."
test-all.bat "{\\\"host\\\":\\\"''' + linux_node_ip + '''\\\",\\\"port\\\":22,\\\"user\\\":\\\"root\\\",\\\"password\\\":\\\"''' + linux_node_pass + '''\\\",\\\"path\\\":\\\"/var/lib/jenkins/workspace/TDinternal\\\"}"
......@@ -319,6 +319,12 @@ pipeline {
parallel {
stage('windows test') {
agent{label " windows10_01 || windows10_02 || windows10_03 || windows10_04 "}
environment{
WIN_INTERNAL_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal"
WIN_COMMUNITY_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community"
WIN_SYSTEM_TEST_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\TDinternal\\community\\tests\\system-test"
WIN_CONNECTOR_ROOT="C:\\workspace\\${env.EXECUTOR_NUMBER}\\taos-connector-python"
}
steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 55, unit: 'MINUTES'){
......@@ -357,7 +363,7 @@ pipeline {
echo "${linux_node_ip}:${linux_node_pass}"
}
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 40, unit: 'MINUTES'){
timeout(time: 120, unit: 'MINUTES'){
pre_test()
script {
sh '''
......@@ -368,11 +374,36 @@ pipeline {
rm -f /tmp/cases.task
./collect_cases.sh -e
'''
def extra_param = ""
def log_server_file = "/home/log_server.json"
def timeout_cmd = ""
if (fileExists(log_server_file)) {
def log_server_enabled = sh (
script: 'jq .enabled ' + log_server_file,
returnStdout: true
).trim()
def timeout_param = sh (
script: 'jq .timeout ' + log_server_file,
returnStdout: true
).trim()
if (timeout_param != "null" && timeout_param != "0") {
timeout_cmd = "timeout " + timeout_param
}
if (log_server_enabled == "1") {
def log_server = sh (
script: 'jq .server ' + log_server_file + ' | sed "s/\\\"//g"',
returnStdout: true
).trim()
if (log_server != "null" && log_server != "") {
extra_param = "-w " + log_server
}
}
}
sh '''
cd ${WKC}/tests/parallel_test
export DEFAULT_RETRY_TIME=2
date
timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480
''' + timeout_cmd + ''' time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME}_${BUILD_ID} -l ${WKDIR}/log -o 480 ''' + extra_param + '''
'''
}
}
......
......@@ -510,7 +510,8 @@ typedef struct {
int8_t superUser;
int8_t connType;
SEpSet epSet;
char sVersion[128];
char sVer[TSDB_VERSION_LEN];
char sDetailVer[128];
} SConnectRsp;
int32_t tSerializeSConnectRsp(void* buf, int32_t bufLen, SConnectRsp* pRsp);
......@@ -836,6 +837,20 @@ typedef struct {
int32_t tSerializeSDnodeListReq(void* buf, int32_t bufLen, SDnodeListReq* pReq);
int32_t tDeserializeSDnodeListReq(void* buf, int32_t bufLen, SDnodeListReq* pReq);
typedef struct {
int32_t useless; // useless
} SServerVerReq;
int32_t tSerializeSServerVerReq(void* buf, int32_t bufLen, SServerVerReq* pReq);
int32_t tDeserializeSServerVerReq(void* buf, int32_t bufLen, SServerVerReq* pReq);
typedef struct {
char ver[TSDB_VERSION_LEN];
} SServerVerRsp;
int32_t tSerializeSServerVerRsp(void* buf, int32_t bufLen, SServerVerRsp* pRsp);
int32_t tDeserializeSServerVerRsp(void* buf, int32_t bufLen, SServerVerRsp* pRsp);
typedef struct SQueryNodeAddr {
int32_t nodeId; // vgId or qnodeId
......@@ -1229,6 +1244,21 @@ typedef struct {
int32_t tSerializeSShowVariablesReq(void* buf, int32_t bufLen, SShowVariablesReq* pReq);
int32_t tDeserializeSShowVariablesReq(void* buf, int32_t bufLen, SShowVariablesReq* pReq);
typedef struct {
char name[TSDB_CONFIG_OPTION_LEN + 1];
char value[TSDB_CONFIG_VALUE_LEN + 1];
} SVariablesInfo;
typedef struct {
SArray *variables; //SArray<SVariablesInfo>
} SShowVariablesRsp;
int32_t tSerializeSShowVariablesRsp(void* buf, int32_t bufLen, SShowVariablesRsp* pReq);
int32_t tDeserializeSShowVariablesRsp(void* buf, int32_t bufLen, SShowVariablesRsp* pReq);
void tFreeSShowVariablesRsp(SShowVariablesRsp* pRsp);
/*
* sql: show tables like '%a_%'
* payload is the query condition, e.g., '%a_%'
......
......@@ -163,6 +163,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_REDISTRIBUTE_VGROUP, "redistribute-vgroup", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SPLIT_VGROUP, "split-vgroup", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SHOW_VARIABLES, "show-variables", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_MSG)
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
......
......@@ -71,6 +71,7 @@ typedef struct SCatalogReq {
SArray* pTableCfg; // element is SNAME
bool qNodeRequired; // valid qnode
bool dNodeRequired; // valid dnode
bool svrVerRequired;
bool forceUpdate;
} SCatalogReq;
......@@ -80,18 +81,19 @@ typedef struct SMetaRes {
} SMetaRes;
typedef struct SMetaData {
SArray* pDbVgroup; // pRes = SArray<SVgroupInfo>*
SArray* pDbCfg; // pRes = SDbCfgInfo*
SArray* pDbInfo; // pRes = SDbInfo*
SArray* pTableMeta; // pRes = STableMeta*
SArray* pTableHash; // pRes = SVgroupInfo*
SArray* pTableIndex; // pRes = SArray<STableIndexInfo>*
SArray* pUdfList; // pRes = SFuncInfo*
SArray* pIndex; // pRes = SIndexInfo*
SArray* pUser; // pRes = bool*
SArray* pQnodeList; // pRes = SArray<SQueryNodeLoad>*
SArray* pTableCfg; // pRes = STableCfg*
SArray* pDnodeList; // pRes = SArray<SEpSet>*
SArray* pDbVgroup; // pRes = SArray<SVgroupInfo>*
SArray* pDbCfg; // pRes = SDbCfgInfo*
SArray* pDbInfo; // pRes = SDbInfo*
SArray* pTableMeta; // pRes = STableMeta*
SArray* pTableHash; // pRes = SVgroupInfo*
SArray* pTableIndex; // pRes = SArray<STableIndexInfo>*
SArray* pUdfList; // pRes = SFuncInfo*
SArray* pIndex; // pRes = SIndexInfo*
SArray* pUser; // pRes = bool*
SArray* pQnodeList; // pRes = SArray<SQueryNodeLoad>*
SArray* pTableCfg; // pRes = STableCfg*
SArray* pDnodeList; // pRes = SArray<SEpSet>*
SMetaRes* pSvrVer; // pRes = char*
} SMetaData;
typedef struct SCatalogCfg {
......@@ -268,7 +270,7 @@ int32_t catalogGetTableHashVgroup(SCatalog* pCatalog, SRequestConnInfo* pConn, c
*/
int32_t catalogGetAllMeta(SCatalog* pCatalog, SRequestConnInfo* pConn, const SCatalogReq* pReq, SMetaData* pRsp);
int32_t catalogAsyncGetAllMeta(SCatalog* pCtg, SRequestConnInfo* pConn, uint64_t reqId, const SCatalogReq* pReq, catalogCallback fp, void* param, int64_t* jobId);
int32_t catalogAsyncGetAllMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SCatalogReq* pReq, catalogCallback fp, void* param, int64_t* jobId);
int32_t catalogGetQnodeList(SCatalog* pCatalog, SRequestConnInfo* pConn, SArray* pQnodeList);
......@@ -298,6 +300,8 @@ int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth);
int32_t catalogUpdateVgEpSet(SCatalog* pCtg, const char* dbFName, int32_t vgId, SEpSet *epSet);
int32_t catalogGetServerVersion(SCatalog* pCtg, SRequestConnInfo *pConn, char** pVersion);
int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, SRequestConnInfo* pConn, uint64_t reqId, bool forceUpdate);
int32_t catalogClearCache(void);
......
......@@ -124,6 +124,7 @@ typedef enum EFunctionType {
FUNCTION_TYPE_BLOCK_DIST, // block distribution aggregate function
FUNCTION_TYPE_BLOCK_DIST_INFO, // block distribution pseudo column function
FUNCTION_TYPE_TO_COLUMN,
FUNCTION_TYPE_GROUP_KEY,
// distributed splitting functions
FUNCTION_TYPE_APERCENTILE_PARTIAL = 4000,
......
......@@ -459,6 +459,7 @@ typedef struct SSubplan {
int32_t msgType; // message type for subplan, used to denote the send message type to vnode.
int32_t level; // the execution level of current subplan, starting from 0 in a top-down manner.
char dbFName[TSDB_DB_FNAME_LEN];
char user[TSDB_USER_LEN];
SQueryNodeAddr execNode; // for the scan/modify subplan, the optional execution node
SQueryNodeStat execNodeStat; // only for scan subplan
SNodeList* pChildren; // the datasource subplan,from which to fetch the result
......
......@@ -51,6 +51,8 @@ typedef struct SParseContext {
bool isSuperUser;
bool async;
int8_t schemalessType;
const char* svrVer;
bool nodeOffline;
} SParseContext;
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
......
......@@ -24,18 +24,19 @@ extern "C" {
#include "taos.h"
typedef struct SPlanContext {
uint64_t queryId;
int32_t acctId;
SEpSet mgmtEpSet;
SNode* pAstRoot;
bool topicQuery;
bool streamQuery;
bool rSmaQuery;
bool showRewrite;
int8_t triggerType;
int64_t watermark;
char* pMsg;
int32_t msgLen;
uint64_t queryId;
int32_t acctId;
SEpSet mgmtEpSet;
SNode* pAstRoot;
bool topicQuery;
bool streamQuery;
bool rSmaQuery;
bool showRewrite;
int8_t triggerType;
int64_t watermark;
char* pMsg;
int32_t msgLen;
const char* pUser;
} SPlanContext;
// Create the physical plan for the query, according to the AST.
......
......@@ -200,6 +200,7 @@ const char* syncGetMyRoleStr(int64_t rid);
SyncTerm syncGetMyTerm(int64_t rid);
SyncGroupId syncGetVgId(int64_t rid);
void syncGetEpSet(int64_t rid, SEpSet* pEpSet);
void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet);
int32_t syncPropose(int64_t rid, SRpcMsg* pMsg, bool isWeak);
bool syncEnvIsStart();
const char* syncStr(ESyncState state);
......
......@@ -54,6 +54,11 @@ enum {
RES_TYPE__TMQ_META,
};
#define SHOW_VARIABLES_RESULT_COLS 2
#define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
......@@ -104,6 +109,8 @@ typedef struct SHeartBeatInfo {
struct SAppInstInfo {
int64_t numOfConns;
SCorEpSet mgmtEp;
int32_t totalDnodes;
int32_t onlineDnodes;
TdThreadMutex qnodeMutex;
SArray* pQnodeList;
SAppClusterSummary summary;
......@@ -127,7 +134,8 @@ typedef struct STscObj {
char user[TSDB_USER_LEN];
char pass[TSDB_PASSWORD_LEN];
char db[TSDB_DB_FNAME_LEN];
char ver[128];
char sVer[TSDB_VERSION_LEN];
char sDetailVer[128];
int8_t connType;
int32_t acctId;
uint32_t connId;
......
......@@ -161,6 +161,9 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
updateEpSet_s(&pTscObj->pAppInfo->mgmtEp, &pRsp->query->epSet);
}
pTscObj->pAppInfo->totalDnodes = pRsp->query->totalDnodes;
pTscObj->pAppInfo->onlineDnodes = pRsp->query->onlineDnodes;
pTscObj->connId = pRsp->query->connId;
if (pRsp->query->killRid) {
......
......@@ -59,7 +59,7 @@ static STscObj* taosConnectImpl(const char* user, const char* auth, const char*
SAppInstInfo* pAppInfo, int connType);
STscObj* taos_connect_internal(const char* ip, const char* user, const char* pass, const char* auth, const char* db,
uint16_t port, int connType) {
uint16_t port, int connType) {
if (taos_init() != TSDB_CODE_SUCCESS) {
return NULL;
}
......@@ -178,7 +178,9 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
.pStmtCb = pStmtCb,
.pUser = pTscObj->user,
.schemalessType = pTscObj->schemalessType,
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER))};
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)),
.svrVer = pTscObj->sVer,
.nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes)};
cxt.mgmtEpSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
int32_t code = catalogGetHandle(pTscObj->pAppInfo->clusterId, &cxt.pCatalog);
......@@ -325,8 +327,8 @@ bool qnodeRequired(SRequestObj* pRequest) {
}
SAppInstInfo* pInfo = pRequest->pTscObj->pAppInfo;
bool required = false;
bool required = false;
taosThreadMutexLock(&pInfo->qnodeMutex);
required = (NULL == pInfo->pQnodeList);
taosThreadMutexUnlock(&pInfo->qnodeMutex);
......@@ -374,7 +376,8 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra
.pAstRoot = pQuery->pRoot,
.showRewrite = pQuery->showRewrite,
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE};
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
.pUser = pRequest->pTscObj->user};
return qCreateQueryPlan(&cxt, pPlan, pNodeList);
}
......@@ -431,11 +434,11 @@ int32_t buildVnodePolicyNodeList(SRequestObj* pRequest, SArray** pNodeList, SArr
}
for (int32_t j = 0; j < vgNum; ++j) {
SVgroupInfo* pInfo = taosArrayGet(pVg, j);
SVgroupInfo* pInfo = taosArrayGet(pVg, j);
SQueryNodeLoad load = {0};
load.addr.nodeId = pInfo->vgId;
load.addr.epSet = pInfo->epSet;
taosArrayPush(nodeList, &load);
}
}
......@@ -493,17 +496,16 @@ _return:
return TSDB_CODE_SUCCESS;
}
int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray* pMnodeList, SMetaData *pResultMeta) {
int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray* pMnodeList, SMetaData* pResultMeta) {
SArray* pDbVgList = NULL;
SArray* pQnodeList = NULL;
int32_t code = 0;
switch (tsQueryPolicy) {
case QUERY_POLICY_VNODE: {
if (pResultMeta) {
pDbVgList = taosArrayInit(4, POINTER_BYTES);
int32_t dbNum = taosArrayGetSize(pResultMeta->pDbVgroup);
for (int32_t i = 0; i < dbNum; ++i) {
SMetaRes* pRes = taosArrayGet(pResultMeta->pDbVgroup, i);
......@@ -512,9 +514,9 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray
}
taosArrayPush(pDbVgList, &pRes->pRes);
}
}
}
code = buildVnodePolicyNodeList(pRequest, pNodeList, pMnodeList, pDbVgList);
break;
}
......@@ -535,7 +537,7 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray
}
taosThreadMutexUnlock(&pInst->qnodeMutex);
}
code = buildQnodePolicyNodeList(pRequest, pNodeList, pMnodeList, pQnodeList);
break;
}
......@@ -546,7 +548,7 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray
taosArrayDestroy(pDbVgList);
taosArrayDestroy(pQnodeList);
return code;
}
......@@ -554,43 +556,43 @@ int32_t buildSyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray*
SArray* pDbVgList = NULL;
SArray* pQnodeList = NULL;
int32_t code = 0;
switch (tsQueryPolicy) {
case QUERY_POLICY_VNODE: {
int32_t dbNum = taosArrayGetSize(pRequest->dbList);
if (dbNum > 0) {
SCatalog* pCtg = NULL;
SCatalog* pCtg = NULL;
SAppInstInfo* pInst = pRequest->pTscObj->pAppInfo;
code = catalogGetHandle(pInst->clusterId, &pCtg);
if (code != TSDB_CODE_SUCCESS) {
goto _return;
}
pDbVgList = taosArrayInit(dbNum, POINTER_BYTES);
pDbVgList = taosArrayInit(dbNum, POINTER_BYTES);
SArray* pVgList = NULL;
for (int32_t i = 0; i < dbNum; ++i) {
char* dbFName = taosArrayGet(pRequest->dbList, i);
char* dbFName = taosArrayGet(pRequest->dbList, i);
SRequestConnInfo conn = {.pTrans = pInst->pTransporter,
.requestId = pRequest->requestId,
.requestObjRefId = pRequest->self,
.mgmtEps = getEpSet_s(&pInst->mgmtEp)};
.mgmtEps = getEpSet_s(&pInst->mgmtEp)};
code = catalogGetDBVgInfo(pCtg, &conn, dbFName, &pVgList);
if (code) {
goto _return;
}
taosArrayPush(pDbVgList, &pVgList);
}
}
}
code = buildVnodePolicyNodeList(pRequest, pNodeList, pMnodeList, pDbVgList);
break;
}
case QUERY_POLICY_HYBRID:
case QUERY_POLICY_QNODE: {
getQnodeList(pRequest, &pQnodeList);
code = buildQnodePolicyNodeList(pRequest, pNodeList, pMnodeList, pQnodeList);
break;
}
......@@ -603,11 +605,10 @@ _return:
taosArrayDestroy(pDbVgList);
taosArrayDestroy(pQnodeList);
return code;
}
int32_t scheduleAsyncQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList) {
tsem_init(&schdRspSem, 0, 0);
......@@ -831,8 +832,8 @@ void schedulerExecCb(SQueryResult* pResult, void* param, int32_t code) {
}
}
tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%d - %s, reqId:0x%" PRIx64,
pRequest->self, code, tstrerror(code), pRequest->requestId);
tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code,
tstrerror(code), pRequest->requestId);
STscObj* pTscObj = pRequest->pTscObj;
if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code)) {
......@@ -878,7 +879,7 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
SArray* pNodeList = NULL;
buildSyncExecNodeList(pRequest, &pNodeList, pMnodeList);
code = scheduleQuery(pRequest, pRequest->body.pDag, pNodeList);
taosArrayDestroy(pNodeList);
}
......@@ -933,7 +934,7 @@ SRequestObj* launchQuery(STscObj* pTscObj, const char* sql, int sqlLen, bool val
return launchQueryImpl(pRequest, pQuery, false, NULL);
}
void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData *pResultMeta) {
void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultMeta) {
int32_t code = 0;
switch (pQuery->execMode) {
......@@ -954,7 +955,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData *pResultM
.pAstRoot = pQuery->pRoot,
.showRewrite = pQuery->showRewrite,
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE};
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
.pUser = pRequest->pTscObj->user};
SAppInstInfo* pAppInfo = getAppInfo(pRequest);
code = qCreateQueryPlan(&cxt, &pRequest->body.pDag, pMnodeList);
......@@ -966,7 +968,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData *pResultM
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
SArray* pNodeList = NULL;
buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta);
SRequestConnInfo conn = {
.pTrans = pAppInfo->pTransporter, .requestId = pRequest->requestId, .requestObjRefId = pRequest->self};
SSchedulerReq req = {.pConn = &conn,
......@@ -1326,7 +1328,7 @@ TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, cons
if (pObj) {
return pObj->id;
}
return NULL;
}
......@@ -1498,10 +1500,10 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
return TSDB_CODE_SUCCESS;
}
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows){
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
char* p = (char*)pResultInfo->pData;
int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
int32_t* colLength = (int32_t*)(p + len);
len += sizeof(int32_t) * numOfCols;
......@@ -1511,7 +1513,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) {
int32_t* offset = (int32_t*)pStart;
int32_t lenTmp = numOfRows * sizeof(int32_t);
int32_t lenTmp = numOfRows * sizeof(int32_t);
len += lenTmp;
pStart += lenTmp;
......@@ -1536,7 +1538,6 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
} else {
ASSERT(0);
}
}
} else if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) {
int32_t lenTmp = numOfRows * sizeof(int32_t);
......@@ -1560,13 +1561,13 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
break;
}
}
if(!needConvert) return TSDB_CODE_SUCCESS;
if (!needConvert) return TSDB_CODE_SUCCESS;
char* p = (char*)pResultInfo->pData;
char* p = (char*)pResultInfo->pData;
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
pResultInfo->convertJson = taosMemoryCalloc(1, dataLen);
if(pResultInfo->convertJson == NULL) return TSDB_CODE_OUT_OF_MEMORY;
if (pResultInfo->convertJson == NULL) return TSDB_CODE_OUT_OF_MEMORY;
char* p1 = pResultInfo->convertJson;
int32_t len = sizeof(int32_t) + sizeof(uint64_t) + numOfCols * (sizeof(int16_t) + sizeof(int32_t));
......@@ -1635,7 +1636,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
ASSERT(0);
}
offset1[j]= len;
offset1[j] = len;
memcpy(pStart1 + len, dst, varDataTLen(dst));
len += varDataTLen(dst);
}
......@@ -1653,7 +1654,6 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
pStart += len;
pStart1 += len;
memcpy(pStart1, pStart, colLen);
}
pStart += colLen;
pStart1 += colLen1;
......@@ -1721,7 +1721,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32
pStart += colLength[i];
}
if(convertUcs4){
if (convertUcs4) {
code = doConvertUCS4(pResultInfo, numOfRows, numOfCols, colLength);
}
......@@ -1838,17 +1838,18 @@ _OVER:
return code;
}
int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, int32_t len2, const char* str, int32_t acctId, char* db) {
int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, int32_t len2, const char* str,
int32_t acctId, char* db) {
SName name;
if (len1 <= 0) {
return -1;
}
const char *dbName = db;
const char *tbName = NULL;
int32_t dbLen = 0;
int32_t tbLen = 0;
const char* dbName = db;
const char* tbName = NULL;
int32_t dbLen = 0;
int32_t tbLen = 0;
if (len2 > 0) {
dbName = str + pos1;
dbLen = len1;
......@@ -1859,7 +1860,7 @@ int32_t appendTbToReq(SArray* pList, int32_t pos1, int32_t len1, int32_t pos2, i
tbName = str + pos1;
tbLen = len1;
}
if (tNameSetDbName(&name, acctId, dbName, dbLen)) {
return -1;
}
......@@ -1879,18 +1880,18 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
terrno = TSDB_CODE_OUT_OF_MEMORY;
return terrno;
}
bool inEscape = false;
bool inEscape = false;
int32_t code = 0;
int32_t vIdx = 0;
int32_t vPos[2];
int32_t vLen[2];
memset(vPos, -1, sizeof(vPos));
memset(vLen, 0, sizeof(vLen));
for (int32_t i = 0; ; ++i) {
for (int32_t i = 0;; ++i) {
if (0 == *(tbList + i)) {
if (vPos[vIdx] >= 0 && vLen[vIdx] <= 0) {
vLen[vIdx] = i - vPos[vIdx];
......@@ -1903,7 +1904,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
break;
}
if ('`' == *(tbList + i)) {
inEscape = !inEscape;
if (!inEscape) {
......@@ -1950,7 +1951,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
if (code) {
goto _return;
}
memset(vPos, -1, sizeof(vPos));
memset(vLen, 0, sizeof(vLen));
vIdx = 0;
......@@ -1964,8 +1965,7 @@ int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName,
continue;
}
if (('a' <= *(tbList + i) && 'z' >= *(tbList + i)) ||
('A' <= *(tbList + i) && 'Z' >= *(tbList + i)) ||
if (('a' <= *(tbList + i) && 'z' >= *(tbList + i)) || ('A' <= *(tbList + i) && 'Z' >= *(tbList + i)) ||
('0' <= *(tbList + i) && '9' >= *(tbList + i))) {
if (vLen[vIdx] > 0) {
goto _return;
......@@ -1987,32 +1987,31 @@ _return:
taosArrayDestroy(*pReq);
*pReq = NULL;
return terrno;
}
void syncCatalogFn(SMetaData* pResult, void* param, int32_t code) {
SSyncQueryParam *pParam = param;
SSyncQueryParam* pParam = param;
pParam->pRequest->code = code;
tsem_post(&pParam->sem);
}
void syncQueryFn(void *param, void *res, int32_t code) {
SSyncQueryParam *pParam = param;
void syncQueryFn(void* param, void* res, int32_t code) {
SSyncQueryParam* pParam = param;
pParam->pRequest = res;
pParam->pRequest->code = code;
tsem_post(&pParam->sem);
}
void taosAsyncQueryImpl(TAOS *taos, const char *sql, __taos_async_fn_t fp, void *param, bool validateOnly) {
STscObj *pTscObj = acquireTscObj(*(int64_t *)taos);
void taosAsyncQueryImpl(TAOS* taos, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly) {
STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
if (pTscObj == NULL || sql == NULL || NULL == fp) {
terrno = TSDB_CODE_INVALID_PARA;
if (pTscObj) {
releaseTscObj(*(int64_t *)taos);
releaseTscObj(*(int64_t*)taos);
} else {
terrno = TSDB_CODE_TSC_DISCONNECTED;
}
......@@ -2029,7 +2028,7 @@ void taosAsyncQueryImpl(TAOS *taos, const char *sql, __taos_async_fn_t fp, void
return;
}
SRequestObj *pRequest = NULL;
SRequestObj* pRequest = NULL;
int32_t code = buildRequest(pTscObj, sql, sqlLen, &pRequest);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
......@@ -2043,45 +2042,41 @@ void taosAsyncQueryImpl(TAOS *taos, const char *sql, __taos_async_fn_t fp, void
doAsyncQuery(pRequest, false);
}
TAOS_RES *taosQueryImpl(TAOS *taos, const char *sql, bool validateOnly) {
TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly) {
if (NULL == taos) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return NULL;
}
STscObj *pTscObj = acquireTscObj(*(int64_t *)taos);
STscObj* pTscObj = acquireTscObj(*(int64_t*)taos);
if (pTscObj == NULL || sql == NULL) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return NULL;
}
#if SYNC_ON_TOP_OF_ASYNC
SSyncQueryParam *param = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
SSyncQueryParam* param = taosMemoryCalloc(1, sizeof(SSyncQueryParam));
tsem_init(&param->sem, 0, 0);
taosAsyncQueryImpl(taos, sql, syncQueryFn, param, validateOnly);
tsem_wait(&param->sem);
releaseTscObj(*(int64_t *)taos);
releaseTscObj(*(int64_t*)taos);
return param->pRequest;
#else
size_t sqlLen = strlen(sql);
if (sqlLen > (size_t)TSDB_MAX_ALLOWED_SQL_LEN) {
releaseTscObj(*(int64_t *)taos);
releaseTscObj(*(int64_t*)taos);
tscError("sql string exceeds max length:%d", TSDB_MAX_ALLOWED_SQL_LEN);
terrno = TSDB_CODE_TSC_EXCEED_SQL_LIMIT;
return NULL;
}
TAOS_RES *pRes = execQuery(pTscObj, sql, sqlLen, validateOnly);
TAOS_RES* pRes = execQuery(pTscObj, sql, sqlLen, validateOnly);
releaseTscObj(*(int64_t *)taos);
releaseTscObj(*(int64_t*)taos);
return pRes;
#endif
}
......@@ -623,7 +623,7 @@ const char *taos_get_server_info(TAOS *taos) {
releaseTscObj(*(int64_t *)taos);
return pTscObj->ver;
return pTscObj->sDetailVer;
}
typedef struct SqlParseWrapper {
......@@ -766,7 +766,7 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
.requestObjRefId = pCxt->requestRid,
.mgmtEps = pCxt->mgmtEpSet};
code = catalogAsyncGetAllMeta(pCxt->pCatalog, &conn, pRequest->requestId, &catalogReq, retrieveMetaCallback, pWrapper,
code = catalogAsyncGetAllMeta(pCxt->pCatalog, &conn, &catalogReq, retrieveMetaCallback, pWrapper,
&pRequest->body.queryJob);
if (code == TSDB_CODE_SUCCESS) {
return;
......@@ -934,7 +934,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
conn.mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp);
code = catalogAsyncGetAllMeta(pCtg, &conn, pRequest->requestId, &catalogReq, syncCatalogFn, &param, NULL);
code = catalogAsyncGetAllMeta(pCtg, &conn, &catalogReq, syncCatalogFn, &param, NULL);
if (code) {
goto _return;
}
......
......@@ -82,7 +82,8 @@ int32_t processConnectRsp(void* param, const SDataBuf* pMsg, int32_t code) {
pTscObj->connId = connectRsp.connId;
pTscObj->acctId = connectRsp.acctId;
tstrncpy(pTscObj->ver, connectRsp.sVersion, tListLen(pTscObj->ver));
tstrncpy(pTscObj->sVer, connectRsp.sVer, tListLen(pTscObj->sVer));
tstrncpy(pTscObj->sDetailVer, connectRsp.sDetailVer, tListLen(pTscObj->sDetailVer));
// update the appInstInfo
pTscObj->pAppInfo->clusterId = connectRsp.clusterId;
......@@ -287,6 +288,103 @@ int32_t processAlterStbRsp(void* param, const SDataBuf* pMsg, int32_t code) {
return code;
}
static int32_t buildShowVariablesBlock(SArray* pVars, SSDataBlock** block) {
SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
pBlock->info.hasVarCol = true;
pBlock->pDataBlock = taosArrayInit(SHOW_VARIABLES_RESULT_COLS, sizeof(SColumnInfoData));
SColumnInfoData infoData = {0};
infoData.info.type = TSDB_DATA_TYPE_VARCHAR;
infoData.info.bytes = SHOW_VARIABLES_RESULT_FIELD1_LEN;
taosArrayPush(pBlock->pDataBlock, &infoData);
infoData.info.type = TSDB_DATA_TYPE_VARCHAR;
infoData.info.bytes = SHOW_VARIABLES_RESULT_FIELD2_LEN;
taosArrayPush(pBlock->pDataBlock, &infoData);
int32_t numOfCfg = taosArrayGetSize(pVars);
blockDataEnsureCapacity(pBlock, numOfCfg);
for (int32_t i = 0, c = 0; i < numOfCfg; ++i, c = 0) {
SVariablesInfo *pInfo = taosArrayGet(pVars, i);
char name[TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(name, pInfo->name, TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE);
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, c++);
colDataAppend(pColInfo, i, name, false);
char value[TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(value, pInfo->value, TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE);
pColInfo = taosArrayGet(pBlock->pDataBlock, c++);
colDataAppend(pColInfo, i, value, false);
}
pBlock->info.rows = numOfCfg;
*block = pBlock;
return TSDB_CODE_SUCCESS;
}
static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) {
SSDataBlock* pBlock = NULL;
int32_t code = buildShowVariablesBlock(pVars, &pBlock);
if (code) {
return code;
}
size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock);
*pRsp = taosMemoryCalloc(1, rspSize);
if (NULL == *pRsp) {
return TSDB_CODE_OUT_OF_MEMORY;
}
(*pRsp)->useconds = 0;
(*pRsp)->completed = 1;
(*pRsp)->precision = 0;
(*pRsp)->compressed = 0;
(*pRsp)->compLen = 0;
(*pRsp)->numOfRows = htonl(pBlock->info.rows);
(*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS);
int32_t len = 0;
blockCompressEncode(pBlock, (*pRsp)->data, &len, SHOW_VARIABLES_RESULT_COLS, false);
ASSERT(len == rspSize - sizeof(SRetrieveTableRsp));
blockDataDestroy(pBlock);
return TSDB_CODE_SUCCESS;
}
int32_t processShowVariablesRsp(void* param, const SDataBuf* pMsg, int32_t code) {
SRequestObj* pRequest = param;
if (code != TSDB_CODE_SUCCESS) {
setErrno(pRequest, code);
} else {
SShowVariablesRsp rsp = {0};
SRetrieveTableRsp* pRes = NULL;
code = tDeserializeSShowVariablesRsp(pMsg->pData, pMsg->len, &rsp);
if (TSDB_CODE_SUCCESS == code) {
code = buildShowVariablesRsp(rsp.variables, &pRes);
}
if (TSDB_CODE_SUCCESS == code) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRes, false, false);
}
tFreeSShowVariablesRsp(&rsp);
}
if (pRequest->body.queryFp != NULL) {
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
} else {
tsem_post(&pRequest->body.rspSem);
}
return code;
}
__async_send_cb_fn_t getMsgRspHandle(int32_t msgType) {
switch (msgType) {
case TDMT_MND_CONNECT:
......@@ -301,6 +399,8 @@ __async_send_cb_fn_t getMsgRspHandle(int32_t msgType) {
return processDropDbRsp;
case TDMT_MND_ALTER_STB:
return processAlterStbRsp;
case TDMT_MND_SHOW_VARIABLES:
return processShowVariablesRsp;
default:
return genericRspCallback;
}
......
......@@ -2250,6 +2250,56 @@ int32_t tDeserializeSDnodeListReq(void *buf, int32_t bufLen, SDnodeListReq *pReq
return 0;
}
int32_t tSerializeSServerVerReq(void *buf, int32_t bufLen, SServerVerReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pReq->useless) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSServerVerReq(void *buf, int32_t bufLen, SServerVerReq *pReq) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->useless) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
int32_t tSerializeSServerVerRsp(void *buf, int32_t bufLen, SServerVerRsp *pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pRsp->ver) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSServerVerRsp(void *buf, int32_t bufLen, SServerVerRsp *pRsp) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pRsp->ver) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
int32_t tSerializeSQnodeListRsp(void *buf, int32_t bufLen, SQnodeListRsp *pRsp) {
SEncoder encoder = {0};
......@@ -2859,6 +2909,67 @@ int32_t tDeserializeSShowVariablesReq(void *buf, int32_t bufLen, SShowVariablesR
return 0;
}
int32_t tEncodeSVariablesInfo(SEncoder* pEncoder, SVariablesInfo* pInfo) {
if (tEncodeCStr(pEncoder, pInfo->name) < 0) return -1;
if (tEncodeCStr(pEncoder, pInfo->value) < 0) return -1;
return 0;
}
int32_t tDecodeSVariablesInfo(SDecoder* pDecoder, SVariablesInfo* pInfo) {
if (tDecodeCStrTo(pDecoder, pInfo->name) < 0) return -1;
if (tDecodeCStrTo(pDecoder, pInfo->value) < 0) return -1;
return 0;
}
int32_t tSerializeSShowVariablesRsp(void* buf, int32_t bufLen, SShowVariablesRsp* pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
int32_t varNum = taosArrayGetSize(pRsp->variables);
if (tEncodeI32(&encoder, varNum) < 0) return -1;
for (int32_t i = 0; i < varNum; ++i) {
SVariablesInfo* pInfo = taosArrayGet(pRsp->variables, i);
if (tEncodeSVariablesInfo(&encoder, pInfo) < 0) return -1;
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSShowVariablesRsp(void* buf, int32_t bufLen, SShowVariablesRsp* pRsp) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
int32_t varNum = 0;
if (tDecodeI32(&decoder, &varNum) < 0) return -1;
if (varNum > 0) {
pRsp->variables = taosArrayInit(varNum, sizeof(SVariablesInfo));
if (NULL == pRsp->variables) return -1;
for (int32_t i = 0; i < varNum; ++i) {
SVariablesInfo info = {0};
if (tDecodeSVariablesInfo(&decoder, &info) < 0) return -1;
if (NULL == taosArrayPush(pRsp->variables, &info)) return -1;
}
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSShowVariablesRsp(SShowVariablesRsp* pRsp) {
if (NULL == pRsp) {
return;
}
taosArrayDestroy(pRsp->variables);
}
int32_t tSerializeSShowReq(void *buf, int32_t bufLen, SShowReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
......@@ -3400,7 +3511,8 @@ int32_t tSerializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
if (tEncodeI8(&encoder, pRsp->superUser) < 0) return -1;
if (tEncodeI8(&encoder, pRsp->connType) < 0) return -1;
if (tEncodeSEpSet(&encoder, &pRsp->epSet) < 0) return -1;
if (tEncodeCStr(&encoder, pRsp->sVersion) < 0) return -1;
if (tEncodeCStr(&encoder, pRsp->sVer) < 0) return -1;
if (tEncodeCStr(&encoder, pRsp->sDetailVer) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
......@@ -3420,7 +3532,8 @@ int32_t tDeserializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
if (tDecodeI8(&decoder, &pRsp->superUser) < 0) return -1;
if (tDecodeI8(&decoder, &pRsp->connType) < 0) return -1;
if (tDecodeSEpSet(&decoder, &pRsp->epSet) < 0) return -1;
if (tDecodeCStrTo(&decoder, pRsp->sVersion) < 0) return -1;
if (tDecodeCStrTo(&decoder, pRsp->sVer) < 0) return -1;
if (tDecodeCStrTo(&decoder, pRsp->sDetailVer) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
......
......@@ -206,6 +206,8 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_SYSTABLE_RETRIEVE, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_GRANT, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_AUTH, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_SHOW_VARIABLES, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_SERVER_VERSION, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_CONTINUE, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
......
......@@ -48,6 +48,7 @@ static int32_t mndDnodeActionInsert(SSdb *pSdb, SDnodeObj *pDnode);
static int32_t mndDnodeActionDelete(SSdb *pSdb, SDnodeObj *pDnode);
static int32_t mndDnodeActionUpdate(SSdb *pSdb, SDnodeObj *pOld, SDnodeObj *pNew);
static int32_t mndProcessDnodeListReq(SRpcMsg *pReq);
static int32_t mndProcessShowVariablesReq(SRpcMsg *pReq);
static int32_t mndProcessCreateDnodeReq(SRpcMsg *pReq);
static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq);
......@@ -78,6 +79,7 @@ int32_t mndInitDnode(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_DND_CONFIG_DNODE_RSP, mndProcessConfigDnodeRsp);
mndSetMsgHandle(pMnode, TDMT_MND_STATUS, mndProcessStatusReq);
mndSetMsgHandle(pMnode, TDMT_MND_DNODE_LIST, mndProcessDnodeListReq);
mndSetMsgHandle(pMnode, TDMT_MND_SHOW_VARIABLES, mndProcessShowVariablesReq);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_CONFIGS, mndRetrieveConfigs);
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_CONFIGS, mndCancelGetNextConfig);
......@@ -554,6 +556,60 @@ _OVER:
return code;
}
static int32_t mndProcessShowVariablesReq(SRpcMsg *pReq) {
SShowVariablesRsp rsp = {0};
int32_t code = -1;
rsp.variables = taosArrayInit(4, sizeof(SVariablesInfo));
if (NULL == rsp.variables) {
mError("failed to alloc SVariablesInfo array while process show variables req");
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _OVER;
}
SVariablesInfo info = {0};
strcpy(info.name, "statusInterval");
snprintf(info.value, TSDB_CONFIG_VALUE_LEN, "%d", tsStatusInterval);
taosArrayPush(rsp.variables, &info);
strcpy(info.name, "timezone");
snprintf(info.value, TSDB_CONFIG_VALUE_LEN, "%s", tsTimezoneStr);
taosArrayPush(rsp.variables, &info);
strcpy(info.name, "locale");
snprintf(info.value, TSDB_CONFIG_VALUE_LEN, "%s", tsLocale);
taosArrayPush(rsp.variables, &info);
strcpy(info.name, "charset");
snprintf(info.value, TSDB_CONFIG_VALUE_LEN, "%s", tsCharset);
taosArrayPush(rsp.variables, &info);
int32_t rspLen = tSerializeSShowVariablesRsp(NULL, 0, &rsp);
void *pRsp = rpcMallocCont(rspLen);
if (pRsp == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _OVER;
}
tSerializeSShowVariablesRsp(pRsp, rspLen, &rsp);
pReq->info.rspLen = rspLen;
pReq->info.rsp = pRsp;
code = 0;
_OVER:
if (code != 0) {
mError("failed to get show variables info since %s", terrstr());
}
tFreeSShowVariablesRsp(&rsp);
return code;
}
static int32_t mndProcessCreateDnodeReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
......
......@@ -70,6 +70,7 @@ static void mndCancelGetNextQuery(SMnode *pMnode, void *pIter);
static void mndFreeApp(SAppObj *pApp);
static int32_t mndRetrieveApps(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
static void mndCancelGetNextApp(SMnode *pMnode, void *pIter);
static int32_t mndProcessSvrVerReq(SRpcMsg *pReq);
int32_t mndInitProfile(SMnode *pMnode) {
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
......@@ -94,6 +95,7 @@ int32_t mndInitProfile(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_MND_CONNECT, mndProcessConnectReq);
mndSetMsgHandle(pMnode, TDMT_MND_KILL_QUERY, mndProcessKillQueryReq);
mndSetMsgHandle(pMnode, TDMT_MND_KILL_CONN, mndProcessKillConnReq);
mndSetMsgHandle(pMnode, TDMT_MND_SERVER_VERSION, mndProcessSvrVerReq);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_CONNS, mndRetrieveConns);
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_CONNS, mndCancelGetNextConn);
......@@ -262,8 +264,9 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
connectRsp.connId = pConn->id;
connectRsp.connType = connReq.connType;
connectRsp.dnodeNum = mndGetDnodeSize(pMnode);
snprintf(connectRsp.sVersion, sizeof(connectRsp.sVersion), "ver:%s\nbuild:%s\ngitinfo:%s", version, buildinfo,
strcpy(connectRsp.sVer, version);
snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", version, buildinfo,
gitinfo);
mndGetMnodeEpSet(pMnode, &connectRsp.epSet);
......@@ -460,6 +463,27 @@ static int32_t mndUpdateAppInfo(SMnode *pMnode, SClientHbReq *pHbReq, SRpcConnIn
return TSDB_CODE_SUCCESS;
}
static int32_t mndGetOnlineDnodeNum(SMnode *pMnode, int32_t *num) {
SSdb *pSdb = pMnode->pSdb;
SDnodeObj *pDnode = NULL;
int64_t curMs = taosGetTimestampMs();
void *pIter = NULL;
while (true) {
pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pDnode);
if (pIter == NULL) break;
bool online = mndIsDnodeOnline(pDnode, curMs);
if (online) {
(*num)++;
}
sdbRelease(pSdb, pDnode);
}
return TSDB_CODE_SUCCESS;
}
static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHbReq *pHbReq,
SClientHbBatchRsp *pBatchRsp) {
SProfileMgmt *pMgmt = &pMnode->profileMgmt;
......@@ -503,7 +527,7 @@ static int32_t mndProcessQueryHeartBeat(SMnode *pMnode, SRpcMsg *pMsg, SClientHb
rspBasic->connId = pConn->id;
rspBasic->totalDnodes = mndGetDnodeSize(pMnode);
rspBasic->onlineDnodes = 1; // TODO
mndGetOnlineDnodeNum(pMnode, &rspBasic->onlineDnodes);
mndGetMnodeEpSet(pMnode, &rspBasic->epSet);
mndCreateQnodeList(pMnode, &rspBasic->pQnodeList, -1);
......@@ -694,6 +718,28 @@ static int32_t mndProcessKillConnReq(SRpcMsg *pReq) {
}
}
static int32_t mndProcessSvrVerReq(SRpcMsg *pReq) {
int32_t code = -1;
SServerVerRsp rsp = {0};
strcpy(rsp.ver, version);
int32_t contLen = tSerializeSServerVerRsp(NULL, 0, &rsp);
if (contLen < 0) goto _over;
void *pRsp = rpcMallocCont(contLen);
if (pRsp == NULL) goto _over;
tSerializeSServerVerRsp(pRsp, contLen, &rsp);
pReq->info.rspLen = contLen;
pReq->info.rsp = pRsp;
code = 0;
_over:
return code;
}
static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows) {
SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb;
......
......@@ -321,6 +321,10 @@ FAIL:
}
int32_t mndPersistTaskDeployReq(STrans *pTrans, const SStreamTask *pTask) {
ASSERT(pTask->isDataScan == 0 || pTask->isDataScan == 1);
if (pTask->isDataScan == 0 && pTask->sinkType == TASK_SINK__NONE) {
ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
}
SEncoder encoder;
tEncoderInit(&encoder, NULL, 0);
tEncodeSStreamTask(&encoder, pTask);
......
......@@ -198,6 +198,10 @@ int32_t mndInitSync(SMnode *pMnode) {
return -1;
}
// decrease election timer
setElectTimerMS(pMgmt->sync, 600);
setHeartbeatTimerMS(pMgmt->sync, 300);
mDebug("mnode-sync is opened, id:%" PRId64, pMgmt->sync);
return 0;
}
......@@ -261,7 +265,8 @@ bool mndIsMaster(SMnode *pMnode) {
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
if (!syncIsReady(pMgmt->sync)) {
terrno = TSDB_CODE_SYN_NOT_LEADER;
// get terrno from syncIsReady
// terrno = TSDB_CODE_SYN_NOT_LEADER;
return false;
}
......
......@@ -5,9 +5,7 @@ target_link_libraries(
PUBLIC sut
)
if(NOT TD_WINDOWS)
add_test(
NAME dbTest
COMMAND dbTest
)
endif(NOT TD_WINDOWS)
add_test(
NAME dbTest
COMMAND dbTest
)
......@@ -5,9 +5,7 @@ target_link_libraries(
PUBLIC sut
)
# if(NOT TD_WINDOWS)
add_test(
NAME funcTest
COMMAND funcTest
)
# endif(NOT TD_WINDOWS)
add_test(
NAME funcTest
COMMAND funcTest
)
\ No newline at end of file
......@@ -5,9 +5,7 @@ target_link_libraries(
PUBLIC sut
)
# if(NOT TD_WINDOWS)
add_test(
NAME profileTest
COMMAND profileTest
)
# endif(NOT TD_WINDOWS)
add_test(
NAME profileTest
COMMAND profileTest
)
......@@ -5,9 +5,7 @@ target_link_libraries(
PUBLIC sut
)
# if(NOT TD_WINDOWS)
add_test(
NAME showTest
COMMAND showTest
)
# endif(NOT TD_WINDOWS)
add_test(
NAME showTest
COMMAND showTest
)
......@@ -5,9 +5,7 @@ target_link_libraries(
PUBLIC sut
)
if(NOT TD_WINDOWS)
add_test(
NAME smaTest
COMMAND smaTest
)
endif(NOT TD_WINDOWS)
add_test(
NAME smaTest
COMMAND smaTest
)
......@@ -5,9 +5,7 @@ target_link_libraries(
PUBLIC sut
)
if(NOT TD_WINDOWS)
add_test(
NAME stbTest
COMMAND stbTest
)
endif(NOT TD_WINDOWS)
\ No newline at end of file
add_test(
NAME stbTest
COMMAND stbTest
)
\ No newline at end of file
......@@ -5,9 +5,7 @@ target_link_libraries(
PUBLIC sut
)
if(NOT TD_WINDOWS)
add_test(
NAME userTest
COMMAND userTest
)
endif(NOT TD_WINDOWS)
add_test(
NAME userTest
COMMAND userTest
)
......@@ -455,6 +455,9 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen) {
}
tDecoderClear(&decoder);
ASSERT(pTask->isDataScan == 0 || pTask->isDataScan == 1);
if (pTask->isDataScan == 0 && pTask->sinkType == TASK_SINK__NONE) {
ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
}
pTask->execStatus = TASK_EXEC_STATUS__IDLE;
......
......@@ -144,11 +144,15 @@ void vnodeProposeMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs) {
vnodeAccumBlockMsg(pVnode, pMsg->msgType);
} else if (code == -1 && terrno == TSDB_CODE_SYN_NOT_LEADER) {
SEpSet newEpSet = {0};
syncGetRetryEpSet(pVnode->sync, &newEpSet);
/*
syncGetEpSet(pVnode->sync, &newEpSet);
SEp *pEp = &newEpSet.eps[newEpSet.inUse];
if (pEp->port == tsServerPort && strcmp(pEp->fqdn, tsLocalFqdn) == 0) {
newEpSet.inUse = (newEpSet.inUse + 1) % newEpSet.numOfEps;
}
*/
vGTrace("vgId:%d, msg:%p is redirect since not leader, numOfEps:%d inUse:%d", vgId, pMsg, newEpSet.numOfEps,
newEpSet.inUse);
......
......@@ -76,6 +76,7 @@ typedef enum {
CTG_TASK_GET_INDEX,
CTG_TASK_GET_UDF,
CTG_TASK_GET_USER,
CTG_TASK_GET_SVR_VER,
} CTG_TASK_TYPE;
typedef enum {
......@@ -224,6 +225,7 @@ typedef struct SCtgJob {
int32_t dbInfoNum;
int32_t tbIndexNum;
int32_t tbCfgNum;
int32_t svrVerNum;
} SCtgJob;
typedef struct SCtgMsgCtx {
......@@ -578,8 +580,9 @@ int32_t ctgGetTbMetaFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SNa
int32_t ctgGetTbMetaFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, SVgroupInfo *vgroupInfo, STableMetaOutput* out, SCtgTask* pTask);
int32_t ctgGetTableCfgFromVnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, SVgroupInfo *vgroupInfo, STableCfg **out, SCtgTask* pTask);
int32_t ctgGetTableCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const SName* pTableName, STableCfg **out, SCtgTask* pTask);
int32_t ctgGetSvrVerFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, char **out, SCtgTask* pTask);
int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint64_t reqId, const SCatalogReq* pReq, catalogCallback fp, void* param, int32_t* taskNum);
int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const SCatalogReq* pReq, catalogCallback fp, void* param, int32_t* taskNum);
int32_t ctgLaunchJob(SCtgJob *pJob);
int32_t ctgMakeAsyncRes(SCtgJob *pJob);
int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp, void* param);
......
......@@ -1051,7 +1051,7 @@ _return:
CTG_API_LEAVE(code);
}
int32_t catalogAsyncGetAllMeta(SCatalog* pCtg, SRequestConnInfo *pConn, uint64_t reqId, const SCatalogReq* pReq, catalogCallback fp, void* param, int64_t* jobId) {
int32_t catalogAsyncGetAllMeta(SCatalog* pCtg, SRequestConnInfo *pConn, const SCatalogReq* pReq, catalogCallback fp, void* param, int64_t* jobId) {
CTG_API_ENTER();
if (NULL == pCtg || NULL == pConn || NULL == pReq || NULL == fp || NULL == param) {
......@@ -1060,7 +1060,7 @@ int32_t catalogAsyncGetAllMeta(SCatalog* pCtg, SRequestConnInfo *pConn, uint64_t
int32_t code = 0, taskNum = 0;
SCtgJob *pJob = NULL;
CTG_ERR_JRET(ctgInitJob(pCtg, pConn, &pJob, reqId, pReq, fp, param, &taskNum));
CTG_ERR_JRET(ctgInitJob(pCtg, pConn, &pJob, pReq, fp, param, &taskNum));
if (taskNum <= 0) {
SMetaData* pMetaData = taosMemoryCalloc(1, sizeof(SMetaData));
fp(pMetaData, param, TSDB_CODE_SUCCESS);
......@@ -1247,6 +1247,22 @@ _return:
CTG_API_LEAVE(code);
}
int32_t catalogGetServerVersion(SCatalog* pCtg, SRequestConnInfo *pConn, char** pVersion) {
CTG_API_ENTER();
if (NULL == pCtg || NULL == pConn || NULL == pVersion) {
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
int32_t code = 0;
CTG_ERR_JRET(ctgGetSvrVerFromMnode(pCtg, pConn, pVersion, NULL));
_return:
CTG_API_LEAVE(code);
}
int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth) {
CTG_API_ENTER();
......
......@@ -255,6 +255,20 @@ int32_t ctgInitGetUserTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
return TSDB_CODE_SUCCESS;
}
int32_t ctgInitGetSvrVerTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
SCtgTask task = {0};
task.type = CTG_TASK_GET_SVR_VER;
task.taskId = taskIdx;
task.pJob = pJob;
taosArrayPush(pJob->pTasks, &task);
qDebug("QID:0x%" PRIx64 " [%dth] task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
return TSDB_CODE_SUCCESS;
}
int32_t ctgInitGetTbIndexTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
SName *name = (SName*)param;
SCtgTask task = {0};
......@@ -413,7 +427,7 @@ int32_t ctgInitTask(SCtgJob *pJob, CTG_TASK_TYPE type, void* param, int32_t *tas
return TSDB_CODE_SUCCESS;
}
int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint64_t reqId, const SCatalogReq* pReq, catalogCallback fp, void* param, int32_t* taskNum) {
int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const SCatalogReq* pReq, catalogCallback fp, void* param, int32_t* taskNum) {
int32_t code = 0;
int32_t tbMetaNum = (int32_t)taosArrayGetSize(pReq->pTableMeta);
int32_t dbVgNum = (int32_t)taosArrayGetSize(pReq->pDbVgroup);
......@@ -421,6 +435,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint6
int32_t udfNum = (int32_t)taosArrayGetSize(pReq->pUdf);
int32_t qnodeNum = pReq->qNodeRequired ? 1 : 0;
int32_t dnodeNum = pReq->dNodeRequired ? 1 : 0;
int32_t svrVerNum = pReq->svrVerRequired ? 1 : 0;
int32_t dbCfgNum = (int32_t)taosArrayGetSize(pReq->pDbCfg);
int32_t indexNum = (int32_t)taosArrayGetSize(pReq->pIndex);
int32_t userNum = (int32_t)taosArrayGetSize(pReq->pUser);
......@@ -428,21 +443,21 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint6
int32_t tbIndexNum = (int32_t)taosArrayGetSize(pReq->pTableIndex);
int32_t tbCfgNum = (int32_t)taosArrayGetSize(pReq->pTableCfg);
*taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dnodeNum + dbCfgNum + indexNum + userNum + dbInfoNum + tbIndexNum + tbCfgNum;
*taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dnodeNum + svrVerNum + dbCfgNum + indexNum + userNum + dbInfoNum + tbIndexNum + tbCfgNum;
if (*taskNum <= 0) {
ctgDebug("Empty input for job, no need to retrieve meta, reqId:0x%" PRIx64, reqId);
ctgDebug("Empty input for job, no need to retrieve meta, reqId:0x%" PRIx64, pConn->requestId);
return TSDB_CODE_SUCCESS;
}
*job = taosMemoryCalloc(1, sizeof(SCtgJob));
if (NULL == *job) {
ctgError("failed to calloc, size:%d, reqId:0x%" PRIx64, (int32_t)sizeof(SCtgJob), reqId);
ctgError("failed to calloc, size:%d, reqId:0x%" PRIx64, (int32_t)sizeof(SCtgJob), pConn->requestId);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
SCtgJob *pJob = *job;
pJob->queryId = reqId;
pJob->queryId = pConn->requestId;
pJob->userFp = fp;
pJob->pCtg = pCtg;
pJob->conn = *pConn;
......@@ -460,6 +475,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint6
pJob->dbInfoNum = dbInfoNum;
pJob->tbIndexNum = tbIndexNum;
pJob->tbCfgNum = tbCfgNum;
pJob->svrVerNum = svrVerNum;
pJob->pTasks = taosArrayInit(*taskNum, sizeof(SCtgTask));
......@@ -530,6 +546,10 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint6
CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_DNODE, NULL, NULL));
}
if (svrVerNum) {
CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_SVR_VER, NULL, NULL));
}
pJob->refId = taosAddRef(gCtgMgmt.jobPool, pJob);
if (pJob->refId < 0) {
ctgError("add job to ref failed, error: %s", tstrerror(terrno));
......@@ -728,6 +748,21 @@ int32_t ctgDumpUserRes(SCtgTask* pTask) {
return TSDB_CODE_SUCCESS;
}
int32_t ctgDumpSvrVer(SCtgTask* pTask) {
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pSvrVer) {
pJob->jobRes.pSvrVer = taosMemoryCalloc(1, sizeof(SMetaRes));
if (NULL == pJob->jobRes.pSvrVer) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
}
pJob->jobRes.pSvrVer->code = pTask->code;
pJob->jobRes.pSvrVer->pRes = pTask->res;
return TSDB_CODE_SUCCESS;
}
int32_t ctgInvokeSubCb(SCtgTask *pTask) {
int32_t code = 0;
......@@ -1156,6 +1191,20 @@ _return:
CTG_RET(code);
}
int32_t ctgHandleGetSvrVerRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
int32_t code = 0;
CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
_return:
ctgHandleTaskEnd(pTask, code);
CTG_RET(code);
}
int32_t ctgAsyncRefreshTbMeta(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
......@@ -1459,6 +1508,15 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
int32_t ctgLaunchGetSvrVerTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
CTG_ERR_RET(ctgGetSvrVerFromMnode(pCtg, pConn, NULL, pTask));
return TSDB_CODE_SUCCESS;
}
int32_t ctgRelaunchGetTbMetaTask(SCtgTask *pTask) {
ctgResetTbMetaTask(pTask);
......@@ -1532,6 +1590,7 @@ SCtgAsyncFps gCtgAsyncFps[] = {
{ctgInitGetIndexTask, ctgLaunchGetIndexTask, ctgHandleGetIndexRsp, ctgDumpIndexRes, NULL, NULL},
{ctgInitGetUdfTask, ctgLaunchGetUdfTask, ctgHandleGetUdfRsp, ctgDumpUdfRes, NULL, NULL},
{ctgInitGetUserTask, ctgLaunchGetUserTask, ctgHandleGetUserRsp, ctgDumpUserRes, NULL, NULL},
{ctgInitGetSvrVerTask, ctgLaunchGetSvrVerTask, ctgHandleGetSvrVerRsp, ctgDumpSvrVer, NULL, NULL},
};
int32_t ctgMakeAsyncRes(SCtgJob *pJob) {
......@@ -1633,7 +1692,7 @@ int32_t ctgLaunchJob(SCtgJob *pJob) {
for (int32_t i = 0; i < taskNum; ++i) {
SCtgTask *pTask = taosArrayGet(pJob->pTasks, i);
qDebug("QID:0x%" PRIx64 " ctg start to launch task %d", pJob->queryId, pTask->taskId);
qDebug("QID:0x%" PRIx64 " ctg launch [%dth] task", pJob->queryId, pTask->taskId);
CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask));
pTask->status = CTG_TASK_LAUNCHED;
}
......
......@@ -210,7 +210,7 @@ int32_t ctgdLaunchAsyncCall(SCatalog* pCtg, SRequestConnInfo* pConn, uint64_t re
int64_t jobId = 0;
CTG_ERR_JRET(catalogAsyncGetAllMeta(pCtg, pConn, reqId, &req, ctgdUserCallback, param, &jobId));
CTG_ERR_JRET(catalogAsyncGetAllMeta(pCtg, pConn, &req, ctgdUserCallback, param, &jobId));
_return:
......
......@@ -217,6 +217,21 @@ int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize,
qDebug("Got stb cfg from mnode, tbFName:%s", target);
break;
}
case TDMT_MND_SERVER_VERSION: {
if (TSDB_CODE_SUCCESS != rspCode) {
qError("error rsp for svr ver from mnode, error:%s", tstrerror(rspCode));
CTG_ERR_RET(rspCode);
}
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
if (code) {
qError("Process svr ver rsp failed, error:%s", tstrerror(code));
CTG_ERR_RET(code);
}
qDebug("Got svr ver from mnode");
break;
}
default:
qError("invalid req type %s", TMSG_INFO(reqType));
return TSDB_CODE_APP_ERROR;
......@@ -811,4 +826,38 @@ int32_t ctgGetTableCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const S
return TSDB_CODE_SUCCESS;
}
int32_t ctgGetSvrVerFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, char **out, SCtgTask* pTask) {
char *msg = NULL;
int32_t msgLen = 0;
int32_t reqType = TDMT_MND_SERVER_VERSION;
void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont;
qDebug("try to get svr ver from mnode");
int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](NULL, &msg, 0, &msgLen, mallocFp);
if (code) {
ctgError("Build get svr ver msg failed, code:%s", tstrerror(code));
CTG_ERR_RET(code);
}
if (pTask) {
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, NULL, NULL));
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
}
SRpcMsg rpcMsg = {
.msgType = reqType,
.pCont = msg,
.contLen = msgLen,
};
SRpcMsg rpcRsp = {0};
rpcSendRecv(pConn->pTrans, &pConn->mgmtEps, &rpcMsg, &rpcRsp);
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, NULL));
return TSDB_CODE_SUCCESS;
}
......@@ -45,6 +45,8 @@ char *ctgTaskTypeStr(CTG_TASK_TYPE type) {
return "[get udf]";
case CTG_TASK_GET_USER:
return "[get user]";
case CTG_TASK_GET_SVR_VER:
return "[get svr ver]";
default:
return "unknown";
}
......@@ -103,8 +105,13 @@ void ctgFreeSMetaData(SMetaData* pData) {
taosArrayDestroy(pData->pQnodeList);
pData->pQnodeList = NULL;
taosArrayDestroy(pData->pDnodeList);
pData->pDnodeList = NULL;
taosArrayDestroy(pData->pTableCfg);
pData->pTableCfg = NULL;
taosMemoryFreeClear(pData->pSvrVer);
}
void ctgFreeSCtgUserAuth(SCtgUserAuth *userCache) {
......@@ -346,20 +353,8 @@ void ctgResetTbMetaTask(SCtgTask* pTask) {
void ctgFreeTaskRes(CTG_TASK_TYPE type, void **pRes) {
switch (type) {
case CTG_TASK_GET_QNODE: {
taosArrayDestroy((SArray*)*pRes);
*pRes = NULL;
break;
}
case CTG_TASK_GET_DNODE: {
taosArrayDestroy((SArray*)*pRes);
*pRes = NULL;
break;
}
case CTG_TASK_GET_TB_META: {
taosMemoryFreeClear(*pRes);
break;
}
case CTG_TASK_GET_QNODE:
case CTG_TASK_GET_DNODE:
case CTG_TASK_GET_DB_VGROUP: {
taosArrayDestroy((SArray*)*pRes);
*pRes = NULL;
......@@ -373,14 +368,6 @@ void ctgFreeTaskRes(CTG_TASK_TYPE type, void **pRes) {
}
break;
}
case CTG_TASK_GET_DB_INFO: {
taosMemoryFreeClear(*pRes);
break;
}
case CTG_TASK_GET_TB_HASH: {
taosMemoryFreeClear(*pRes);
break;
}
case CTG_TASK_GET_TB_INDEX: {
taosArrayDestroyEx(*pRes, tFreeSTableIndexInfo);
*pRes = NULL;
......@@ -394,15 +381,13 @@ void ctgFreeTaskRes(CTG_TASK_TYPE type, void **pRes) {
}
break;
}
case CTG_TASK_GET_INDEX: {
taosMemoryFreeClear(*pRes);
break;
}
case CTG_TASK_GET_UDF: {
taosMemoryFreeClear(*pRes);
break;
}
case CTG_TASK_GET_USER: {
case CTG_TASK_GET_TB_HASH:
case CTG_TASK_GET_DB_INFO:
case CTG_TASK_GET_INDEX:
case CTG_TASK_GET_UDF:
case CTG_TASK_GET_USER:
case CTG_TASK_GET_SVR_VER:
case CTG_TASK_GET_TB_META: {
taosMemoryFreeClear(*pRes);
break;
}
......@@ -415,20 +400,12 @@ void ctgFreeTaskRes(CTG_TASK_TYPE type, void **pRes) {
void ctgFreeSubTaskRes(CTG_TASK_TYPE type, void **pRes) {
switch (type) {
case CTG_TASK_GET_QNODE: {
taosArrayDestroy((SArray*)*pRes);
*pRes = NULL;
break;
}
case CTG_TASK_GET_QNODE:
case CTG_TASK_GET_DNODE: {
taosArrayDestroy((SArray*)*pRes);
*pRes = NULL;
break;
}
case CTG_TASK_GET_TB_META: {
taosMemoryFreeClear(*pRes);
break;
}
case CTG_TASK_GET_DB_VGROUP: {
if (*pRes) {
SDBVgInfo* pInfo = (SDBVgInfo*)*pRes;
......@@ -445,14 +422,6 @@ void ctgFreeSubTaskRes(CTG_TASK_TYPE type, void **pRes) {
}
break;
}
case CTG_TASK_GET_DB_INFO: {
taosMemoryFreeClear(*pRes);
break;
}
case CTG_TASK_GET_TB_HASH: {
taosMemoryFreeClear(*pRes);
break;
}
case CTG_TASK_GET_TB_INDEX: {
taosArrayDestroyEx(*pRes, tFreeSTableIndexInfo);
*pRes = NULL;
......@@ -466,14 +435,12 @@ void ctgFreeSubTaskRes(CTG_TASK_TYPE type, void **pRes) {
}
break;
}
case CTG_TASK_GET_INDEX: {
taosMemoryFreeClear(*pRes);
break;
}
case CTG_TASK_GET_UDF: {
taosMemoryFreeClear(*pRes);
break;
}
case CTG_TASK_GET_TB_META:
case CTG_TASK_GET_DB_INFO:
case CTG_TASK_GET_TB_HASH:
case CTG_TASK_GET_INDEX:
case CTG_TASK_GET_UDF:
case CTG_TASK_GET_SVR_VER:
case CTG_TASK_GET_USER: {
taosMemoryFreeClear(*pRes);
break;
......@@ -497,10 +464,6 @@ void ctgClearSubTaskRes(SCtgSubRes *pRes) {
void ctgFreeTaskCtx(SCtgTask* pTask) {
switch (pTask->type) {
case CTG_TASK_GET_QNODE: {
taosMemoryFreeClear(pTask->taskCtx);
break;
}
case CTG_TASK_GET_TB_META: {
SCtgTbMetaCtx* taskCtx = (SCtgTbMetaCtx*)pTask->taskCtx;
taosMemoryFreeClear(taskCtx->pName);
......@@ -511,18 +474,6 @@ void ctgFreeTaskCtx(SCtgTask* pTask) {
taosMemoryFreeClear(pTask->taskCtx);
break;
}
case CTG_TASK_GET_DB_VGROUP: {
taosMemoryFreeClear(pTask->taskCtx);
break;
}
case CTG_TASK_GET_DB_CFG: {
taosMemoryFreeClear(pTask->taskCtx);
break;
}
case CTG_TASK_GET_DB_INFO: {
taosMemoryFreeClear(pTask->taskCtx);
break;
}
case CTG_TASK_GET_TB_HASH: {
SCtgTbHashCtx* taskCtx = (SCtgTbHashCtx*)pTask->taskCtx;
taosMemoryFreeClear(taskCtx->pName);
......@@ -542,14 +493,12 @@ void ctgFreeTaskCtx(SCtgTask* pTask) {
taosMemoryFreeClear(pTask->taskCtx);
break;
}
case CTG_TASK_GET_INDEX: {
taosMemoryFreeClear(pTask->taskCtx);
break;
}
case CTG_TASK_GET_UDF: {
taosMemoryFreeClear(pTask->taskCtx);
break;
}
case CTG_TASK_GET_DB_VGROUP:
case CTG_TASK_GET_DB_CFG:
case CTG_TASK_GET_DB_INFO:
case CTG_TASK_GET_INDEX:
case CTG_TASK_GET_UDF:
case CTG_TASK_GET_QNODE:
case CTG_TASK_GET_USER: {
taosMemoryFreeClear(pTask->taskCtx);
break;
......
......@@ -145,10 +145,10 @@ static SArray* filterQualifiedChildTables(const SStreamBlockScanInfo* pScanInfo,
continue;
}
ASSERT(mr.me.type == TSDB_CHILD_TABLE);
if (mr.me.ctbEntry.suid != pScanInfo->tableUid) {
if (mr.me.type != TSDB_CHILD_TABLE || mr.me.ctbEntry.suid != pScanInfo->tableUid) {
continue;
}
// TODO handle ntb case
taosArrayPush(qa, id);
}
......
......@@ -4365,6 +4365,26 @@ _error:
return NULL;
}
static int32_t extractTbscanInStreamOpTree(SOperatorInfo* pOperator, STableScanInfo** ppInfo) {
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
if (pOperator->numOfDownstream == 0) {
qError("failed to find stream scan operator");
return TSDB_CODE_QRY_APP_ERROR;
}
if (pOperator->numOfDownstream > 1) {
qError("join not supported for stream block scan");
return TSDB_CODE_QRY_APP_ERROR;
}
return extractTbscanInStreamOpTree(pOperator->pDownstream[0], ppInfo);
} else {
SStreamBlockScanInfo* pInfo = pOperator->info;
ASSERT(pInfo->pSnapshotReadOp->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN);
*ppInfo = pInfo->pSnapshotReadOp->info;
return 0;
}
}
int32_t extractTableScanNode(SPhysiNode* pNode, STableScanPhysiNode** ppNode) {
if (pNode->pChildren == NULL || LIST_LENGTH(pNode->pChildren) == 0) {
if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == pNode->type) {
......@@ -4387,37 +4407,27 @@ int32_t extractTableScanNode(SPhysiNode* pNode, STableScanPhysiNode** ppNode) {
return -1;
}
int32_t doRebuildReader(SOperatorInfo* pOperator, SSubplan* plan, SReadHandle* pHandle) {
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
if (pOperator->numOfDownstream == 0) {
qError("failed to find stream scan operator");
return TSDB_CODE_QRY_APP_ERROR;
}
int32_t rebuildReader(SOperatorInfo* pOperator, SSubplan* plan, SReadHandle* pHandle, int64_t uid, int64_t ts) {
STableScanInfo* pTableScanInfo = NULL;
if (extractTbscanInStreamOpTree(pOperator, &pTableScanInfo) < 0) {
return -1;
}
if (pOperator->numOfDownstream > 1) {
qError("join not supported for stream block scan");
return TSDB_CODE_QRY_APP_ERROR;
}
return doRebuildReader(pOperator->pDownstream[0], plan, pHandle);
} else {
SStreamBlockScanInfo* pInfo = pOperator->info;
ASSERT(pInfo->pSnapshotReadOp->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN);
STableScanInfo* pTableScanInfo = pInfo->pSnapshotReadOp->info;
STableScanPhysiNode* pNode = NULL;
if (extractTableScanNode(plan->pNode, &pNode) < 0) {
ASSERT(0);
}
tsdbCleanupReadHandle(pTableScanInfo->dataReader);
STableScanPhysiNode* pNode = NULL;
if (extractTableScanNode(plan->pNode, &pNode) < 0) {
ASSERT(0);
}
tsdbCleanupReadHandle(pTableScanInfo->dataReader);
STableListInfo info = {0};
pTableScanInfo->dataReader = doCreateDataReader(pNode, pHandle, &info, 0, 0);
if (pTableScanInfo->dataReader == NULL) {
ASSERT(0);
qError("failed to create data reader");
return TSDB_CODE_QRY_APP_ERROR;
}
STableListInfo info = {0};
pTableScanInfo->dataReader = doCreateDataReader(pNode, pHandle, &info, 0, 0);
if (pTableScanInfo->dataReader == NULL) {
ASSERT(0);
qError("failed to create data reader");
return TSDB_CODE_QRY_APP_ERROR;
}
// TODO: set uid and ts to data reader
return 0;
}
......
......@@ -3902,6 +3902,7 @@ typedef struct SMergeIntervalAggOperatorInfo {
SIntervalAggOperatorInfo intervalAggOperatorInfo;
SHashObj* groupIntervalHash;
void* groupIntervalIter;
bool hasGroupId;
uint64_t groupId;
SSDataBlock* prefetchedBlock;
......@@ -3914,6 +3915,23 @@ void destroyMergeIntervalOperatorInfo(void* param, int32_t numOfOutput) {
destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo, numOfOutput);
}
static int32_t finalizeWindowResult(SOperatorInfo* pOperatorInfo, uint64_t tableGroupId, STimeWindow* win, SSDataBlock* pResultBlock) {
SMergeIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info;
SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
bool ascScan = (iaInfo->order == TSDB_ORDER_ASC);
SExprSupp* pExprSup = &pOperatorInfo->exprSupp;
SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &win->skey, TSDB_KEYSIZE, tableGroupId);
SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf,
GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
ASSERT(p1 != NULL);
finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pExprSup->pCtx, pExprSup->pExprInfo,
pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset, pResultBlock, pTaskInfo);
taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
return TSDB_CODE_SUCCESS;
}
static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t tableGroupId, SSDataBlock* pResultBlock,
STimeWindow* newWin) {
SMergeIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info;
......@@ -3928,20 +3946,9 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t
return 0;
}
if (newWin == NULL || (ascScan && newWin->skey > prevWin->ekey || (!ascScan) && newWin->skey < prevWin->ekey)) {
SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &prevWin->skey, TSDB_KEYSIZE, tableGroupId);
SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf,
GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
ASSERT(p1 != NULL);
finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pExprSup->pCtx, pExprSup->pExprInfo,
pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset, pResultBlock, pTaskInfo);
taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
if (newWin == NULL) {
taosHashRemove(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId));
} else {
taosHashPut(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId), newWin, sizeof(STimeWindow));
}
if ((ascScan && newWin->skey > prevWin->skey || (!ascScan) && newWin->skey < prevWin->skey)) {
finalizeWindowResult(pOperatorInfo, tableGroupId, prevWin, pResultBlock);
taosHashPut(miaInfo->groupIntervalHash, &tableGroupId, sizeof(tableGroupId), newWin, sizeof(STimeWindow));
}
return 0;
......@@ -4090,12 +4097,17 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
}
pRes->info.groupId = miaInfo->groupId;
} else {
void* p = taosHashIterate(miaInfo->groupIntervalHash, NULL);
if (p != NULL) {
}
if (miaInfo->inputBlocksFinished) {
void* win = taosHashIterate(miaInfo->groupIntervalHash, miaInfo->groupIntervalIter);
if (win != NULL) {
miaInfo->groupIntervalIter = win;
size_t len = 0;
uint64_t* pKey = taosHashGetKey(p, &len);
outputPrevIntervalResult(pOperator, *pKey, pRes, NULL);
uint64_t* pTableGroupId = taosHashGetKey(win, &len);
finalizeWindowResult(pOperator, *pTableGroupId, win, pRes);
pRes->info.groupId = *pTableGroupId;
}
}
......@@ -4118,6 +4130,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI
}
miaInfo->groupIntervalHash = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), true, HASH_NO_LOCK);
miaInfo->groupIntervalIter = NULL;
SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
......
......@@ -202,6 +202,10 @@ bool blockDistSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
int32_t blockDistFunction(SqlFunctionCtx *pCtx);
int32_t blockDistFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
bool getGroupKeyFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv);
int32_t groupKeyFunction(SqlFunctionCtx* pCtx);
int32_t groupKeyFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
#ifdef __cplusplus
}
#endif
......
......@@ -1519,6 +1519,16 @@ static bool getBlockDistFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv
return true;
}
static int32_t translateGroupKey(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return TSDB_CODE_SUCCESS;
}
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
pFunc->node.resType = ((SExprNode*)pPara)->resType;
return TSDB_CODE_SUCCESS;
}
// clang-format off
const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
......@@ -2506,12 +2516,16 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
},
{
.name = "_group_key",
.type = FUNCTION_TYPE_BLOCK_DIST_INFO,
.type = FUNCTION_TYPE_GROUP_KEY,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateGroupKeyFunc,
.translateFunc = translateGroupKey,
.getEnvFunc = getGroupKeyFuncEnv,
.initFunc = functionSetup,
.processFunc = groupKeyFunction,
.finalizeFunc = groupKeyFinalize,
.pPartialFunc = "_group_key",
.pMergeFunc = "_group_key"
}
},
};
// clang-format on
......
......@@ -262,6 +262,12 @@ typedef struct SRateInfo {
int8_t hasResult; // flag to denote has value
} SRateInfo;
typedef struct SGroupKeyInfo{
bool hasResult;
char data[];
} SGroupKeyInfo;
#define SET_VAL(_info, numOfElem, res) \
do { \
if ((numOfElem) <= 0) { \
......@@ -2402,6 +2408,12 @@ bool getSelectivityFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
return true;
}
bool getGroupKeyFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
SColumnNode* pNode = (SColumnNode*)nodesListGetNode(pFunc->pParameterList, 0);
pEnv->calcMemSize = sizeof(SGroupKeyInfo) + pNode->node.resType.bytes;
return true;
}
static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowIndex) {
if (pTsColInfo == NULL) {
return 0;
......@@ -5349,6 +5361,43 @@ int32_t irateFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
return pResInfo->numOfRes;
}
int32_t groupKeyFunction(SqlFunctionCtx* pCtx) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SGroupKeyInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
SInputColumnInfoData* pInput = &pCtx->input;
SColumnInfoData* pInputCol = pInput->pData[0];
int32_t bytes = pInputCol->info.bytes;
int32_t startIndex = pInput->startRowIndex;
if (colDataIsNull_s(pInputCol, startIndex)) {
pInfo->hasResult = false;
goto _group_key_over;
}
pInfo->hasResult = true;
char* data = colDataGetData(pInputCol, startIndex);
memcpy(pInfo->data, data, bytes);
_group_key_over:
SET_VAL(pResInfo, 1, 1);
return TSDB_CODE_SUCCESS;
}
int32_t groupKeyFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SGroupKeyInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
colDataAppend(pCol, pBlock->info.rows, pInfo->data, pInfo->hasResult ? false : true);
return pResInfo->numOfRes;
}
int32_t interpFunction(SqlFunctionCtx* pCtx) {
#if 0
int32_t fillType = (int32_t) pCtx->param[2].i64;
......
......@@ -2294,6 +2294,7 @@ static const char* jkSubplanType = "SubplanType";
static const char* jkSubplanMsgType = "MsgType";
static const char* jkSubplanLevel = "Level";
static const char* jkSubplanDbFName = "DbFName";
static const char* jkSubplanUser = "User";
static const char* jkSubplanNodeAddr = "NodeAddr";
static const char* jkSubplanRootNode = "RootNode";
static const char* jkSubplanDataSink = "DataSink";
......@@ -2316,6 +2317,9 @@ static int32_t subplanToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkSubplanDbFName, pNode->dbFName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddStringToObject(pJson, jkSubplanUser, pNode->user);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkSubplanNodeAddr, queryNodeAddrToJson, &pNode->execNode);
}
......@@ -2352,6 +2356,9 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkSubplanDbFName, pNode->dbFName);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkSubplanUser, pNode->user);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonToObject(pJson, jkSubplanNodeAddr, jsonToQueryNodeAddr, &pNode->execNode);
}
......
......@@ -26,6 +26,7 @@
typedef struct SBlockKeyTuple {
TSKEY skey;
void* payloadAddr;
int16_t index;
} SBlockKeyTuple;
typedef struct SBlockKeyInfo {
......@@ -36,7 +37,6 @@ typedef struct SBlockKeyInfo {
static int32_t rowDataCompar(const void* lhs, const void* rhs) {
TSKEY left = *(TSKEY*)lhs;
TSKEY right = *(TSKEY*)rhs;
if (left == right) {
return 0;
} else {
......@@ -44,6 +44,16 @@ static int32_t rowDataCompar(const void* lhs, const void* rhs) {
}
}
static int32_t rowDataComparStable(const void* lhs, const void* rhs) {
TSKEY left = *(TSKEY*)lhs;
TSKEY right = *(TSKEY*)rhs;
if (left == right) {
return ((SBlockKeyTuple*)lhs)->index - ((SBlockKeyTuple*)rhs)->index;
} else {
return left > right ? 1 : -1;
}
}
void setBoundColumnInfo(SParsedDataColInfo* pColList, SSchema* pSchema, col_id_t numOfCols) {
pColList->numOfCols = numOfCols;
pColList->numOfBound = numOfCols;
......@@ -343,6 +353,7 @@ int sortRemoveDataBlockDupRows(STableDataBlocks* dataBuf, SBlockKeyInfo* pBlkKey
while (n < nRows) {
pBlkKeyTuple->skey = TD_ROW_KEY((STSRow*)pBlockData);
pBlkKeyTuple->payloadAddr = pBlockData;
pBlkKeyTuple->index = n;
// next loop
pBlockData += extendedRowSize;
......@@ -354,7 +365,7 @@ int sortRemoveDataBlockDupRows(STableDataBlocks* dataBuf, SBlockKeyInfo* pBlkKey
pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
// todo. qsort is unstable, if timestamp is same, should get the last one
qsort(pBlkKeyTuple, nRows, sizeof(SBlockKeyTuple), rowDataCompar);
qsort(pBlkKeyTuple, nRows, sizeof(SBlockKeyTuple), rowDataComparStable);
pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
int32_t i = 0;
......
......@@ -4662,7 +4662,7 @@ static int32_t extractShowCreateTableResultSchema(int32_t* numOfCols, SSchema**
return TSDB_CODE_SUCCESS;
}
static int32_t extractShowLocalVariablesResultSchema(int32_t* numOfCols, SSchema** pSchema) {
static int32_t extractShowVariablesResultSchema(int32_t* numOfCols, SSchema** pSchema) {
*numOfCols = 2;
*pSchema = taosMemoryCalloc((*numOfCols), sizeof(SSchema));
if (NULL == (*pSchema)) {
......@@ -4699,7 +4699,8 @@ int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pS
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
return extractShowCreateTableResultSchema(numOfCols, pSchema);
case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT:
return extractShowLocalVariablesResultSchema(numOfCols, pSchema);
case QUERY_NODE_SHOW_VARIABLES_STMT:
return extractShowVariablesResultSchema(numOfCols, pSchema);
default:
break;
}
......@@ -5930,7 +5931,6 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) {
case QUERY_NODE_SHOW_CLUSTER_STMT:
case QUERY_NODE_SHOW_TOPICS_STMT:
case QUERY_NODE_SHOW_TRANSACTIONS_STMT:
case QUERY_NODE_SHOW_VARIABLES_STMT:
case QUERY_NODE_SHOW_APPS_STMT:
case QUERY_NODE_SHOW_CONSUMERS_STMT:
case QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT:
......@@ -6032,6 +6032,14 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
case QUERY_NODE_ALTER_LOCAL_STMT:
pQuery->execMode = QUERY_EXEC_MODE_LOCAL;
break;
case QUERY_NODE_SHOW_VARIABLES_STMT:
pQuery->haveResultSet = true;
pQuery->execMode = QUERY_EXEC_MODE_RPC;
if (NULL != pCxt->pCmdMsg) {
TSWAP(pQuery->pCmdMsg, pCxt->pCmdMsg);
pQuery->msgType = pQuery->pCmdMsg->msgType;
}
break;
default:
pQuery->execMode = QUERY_EXEC_MODE_RPC;
if (NULL != pCxt->pCmdMsg) {
......
......@@ -54,12 +54,6 @@ class ParserDdlTest : public ParserTestBase {
virtual void checkDdl(const SQuery* pQuery, ParserStage stage) {
ASSERT_NE(pQuery, nullptr);
ASSERT_NE(pQuery->pRoot, nullptr);
if (QUERY_EXEC_MODE_RPC == pQuery->execMode) {
ASSERT_EQ(pQuery->haveResultSet, false);
ASSERT_EQ(pQuery->numOfResCols, 0);
ASSERT_EQ(pQuery->pResSchema, nullptr);
ASSERT_EQ(pQuery->precision, 0);
}
if (nullptr != checkDdl_) {
checkDdl_(pQuery, stage);
}
......
......@@ -1465,6 +1465,9 @@ static SSubplan* makeSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogicSubpl
pSubplan->id = pLogicSubplan->id;
pSubplan->subplanType = pLogicSubplan->subplanType;
pSubplan->level = pLogicSubplan->level;
if (NULL != pCxt->pPlanCxt->pUser) {
strcpy(pSubplan->user, pCxt->pPlanCxt->pUser);
}
return pSubplan;
}
......
......@@ -32,9 +32,7 @@ if(${BUILD_WINGETOPT})
target_link_libraries(plannerTest PUBLIC wingetopt)
endif()
# if(NOT TD_WINDOWS)
add_test(
NAME plannerTest
COMMAND plannerTest
)
# endif(NOT TD_WINDOWS)
add_test(
NAME plannerTest
COMMAND plannerTest
)
......@@ -85,8 +85,9 @@ class PlannerTestBaseImpl {
public:
PlannerTestBaseImpl() : sqlNo_(0) {}
void useDb(const string& acctId, const string& db) {
caseEnv_.acctId_ = acctId;
void useDb(const string& user, const string& db) {
caseEnv_.acctId_ = 0;
caseEnv_.user_ = user;
caseEnv_.db_ = db;
caseEnv_.nsql_ = g_skipSql;
}
......@@ -193,7 +194,8 @@ class PlannerTestBaseImpl {
private:
struct caseEnv {
string acctId_;
int32_t acctId_;
string user_;
string db_;
int32_t nsql_;
......@@ -295,7 +297,7 @@ class PlannerTestBaseImpl {
transform(stmtEnv_.sql_.begin(), stmtEnv_.sql_.end(), stmtEnv_.sql_.begin(), ::tolower);
SParseContext cxt = {0};
cxt.acctId = atoi(caseEnv_.acctId_.c_str());
cxt.acctId = caseEnv_.acctId_;
cxt.db = caseEnv_.db_.c_str();
cxt.pSql = stmtEnv_.sql_.c_str();
cxt.sqlLen = stmtEnv_.sql_.length();
......@@ -319,12 +321,13 @@ class PlannerTestBaseImpl {
void doParseBoundSql(SQuery* pQuery) {
SParseContext cxt = {0};
cxt.acctId = atoi(caseEnv_.acctId_.c_str());
cxt.acctId = caseEnv_.acctId_;
cxt.db = caseEnv_.db_.c_str();
cxt.pSql = stmtEnv_.sql_.c_str();
cxt.sqlLen = stmtEnv_.sql_.length();
cxt.pMsg = stmtEnv_.msgBuf_.data();
cxt.msgLen = stmtEnv_.msgBuf_.max_size();
cxt.pUser = caseEnv_.user_.c_str();
DO_WITH_THROW(qStmtParseQuerySql, &cxt, pQuery);
res_.ast_ = toString(pQuery->pRoot);
......@@ -364,6 +367,7 @@ class PlannerTestBaseImpl {
void setPlanContext(SQuery* pQuery, SPlanContext* pCxt) {
pCxt->queryId = 1;
pCxt->pUser = caseEnv_.user_.c_str();
if (QUERY_NODE_CREATE_TOPIC_STMT == nodeType(pQuery->pRoot)) {
pCxt->pAstRoot = ((SCreateTopicStmt*)pQuery->pRoot)->pQuery;
pCxt->topicQuery = true;
......@@ -403,7 +407,7 @@ PlannerTestBase::PlannerTestBase() : impl_(new PlannerTestBaseImpl()) {}
PlannerTestBase::~PlannerTestBase() {}
void PlannerTestBase::useDb(const std::string& acctId, const std::string& db) { impl_->useDb(acctId, db); }
void PlannerTestBase::useDb(const std::string& user, const std::string& db) { impl_->useDb(user, db); }
void PlannerTestBase::run(const std::string& sql) { return impl_->run(sql); }
......
......@@ -30,7 +30,7 @@ class PlannerTestBase : public testing::Test {
PlannerTestBase();
virtual ~PlannerTestBase();
void useDb(const std::string& acctId, const std::string& db);
void useDb(const std::string& user, const std::string& db);
void run(const std::string& sql);
// stmt mode APIs
void prepare(const std::string& sql);
......
......@@ -144,6 +144,23 @@ int32_t queryBuildDnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t
return TSDB_CODE_SUCCESS;
}
int32_t queryBuildGetSerVerMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
SServerVerReq req = {0};
int32_t bufLen = tSerializeSServerVerReq(NULL, 0, &req);
void *pBuf = (*mallcFp)(bufLen);
tSerializeSServerVerReq(pBuf, bufLen, &req);
*msg = pBuf;
*msgLen = bufLen;
return TSDB_CODE_SUCCESS;
}
int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) {
if (NULL == msg || NULL == msgLen) {
......@@ -467,6 +484,26 @@ int32_t queryProcessDnodeListRsp(void *output, char *msg, int32_t msgSize) {
return code;
}
int32_t queryProcessGetSerVerRsp(void *output, char *msg, int32_t msgSize) {
SServerVerRsp out = {0};
int32_t code = 0;
if (NULL == output || NULL == msg || msgSize <= 0) {
code = TSDB_CODE_TSC_INVALID_INPUT;
return code;
}
if (tDeserializeSServerVerRsp(msg, msgSize, &out) != 0) {
qError("invalid svr ver rsp msg, msgSize:%d", msgSize);
code = TSDB_CODE_INVALID_MSG;
return code;
}
*(char**)output = strdup(out.ver);
return code;
}
int32_t queryProcessGetDbCfgRsp(void *output, char *msg, int32_t msgSize) {
SDbCfgRsp out = {0};
......@@ -583,6 +620,7 @@ void initQueryModuleMsgHandle() {
queryBuildMsg[TMSG_INDEX(TDMT_MND_GET_TABLE_INDEX)] = queryBuildGetTbIndexMsg;
queryBuildMsg[TMSG_INDEX(TDMT_VND_TABLE_CFG)] = queryBuildGetTbCfgMsg;
queryBuildMsg[TMSG_INDEX(TDMT_MND_TABLE_CFG)] = queryBuildGetTbCfgMsg;
queryBuildMsg[TMSG_INDEX(TDMT_MND_SERVER_VERSION)] = queryBuildGetSerVerMsg;
queryProcessMsgRsp[TMSG_INDEX(TDMT_VND_TABLE_META)] = queryProcessTableMetaRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_TABLE_META)] = queryProcessTableMetaRsp;
......@@ -596,6 +634,7 @@ void initQueryModuleMsgHandle() {
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_GET_TABLE_INDEX)] = queryProcessGetTbIndexRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_VND_TABLE_CFG)] = queryProcessGetTbCfgRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_TABLE_CFG)] = queryProcessGetTbCfgRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_SERVER_VERSION)] = queryProcessGetSerVerRsp;
}
#pragma GCC diagnostic pop
......@@ -70,20 +70,21 @@ int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* p
if (tEncodeI32(pEncoder, pReq->dstTaskId) < 0) return -1;
if (tEncodeI32(pEncoder, pReq->srcNodeId) < 0) return -1;
if (tEncodeI32(pEncoder, pReq->srcTaskId) < 0) return -1;
if (tEncodeBinary(pEncoder, (const uint8_t*)&pReq->pRetrieve, pReq->retrieveLen) < 0) return -1;
if (tEncodeBinary(pEncoder, (const uint8_t*)pReq->pRetrieve, pReq->retrieveLen) < 0) return -1;
tEndEncode(pEncoder);
return pEncoder->pos;
}
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq) {
int32_t tlen = 0;
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->dstNodeId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->dstTaskId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->srcNodeId) < 0) return -1;
if (tDecodeI32(pDecoder, &pReq->srcTaskId) < 0) return -1;
if (tDecodeBinary(pDecoder, (uint8_t**)&pReq->pRetrieve, &pReq->retrieveLen) < 0) return -1;
uint64_t len = 0;
if (tDecodeBinaryAlloc(pDecoder, (void**)&pReq->pRetrieve, &len) < 0) return -1;
pReq->retrieveLen = len;
tEndDecode(pDecoder);
return 0;
}
......
......@@ -105,9 +105,16 @@ int32_t syncNodeElect(SSyncNode* pSyncNode) {
}
int32_t syncNodeRequestVote(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncRequestVote* pMsg) {
sTrace("syncNodeRequestVote pSyncNode:%p ", pSyncNode);
int32_t ret = 0;
do {
char host[128];
uint16_t port;
syncUtilU642Addr(destRaftId->addr, host, sizeof(host), &port);
sDebug("vgId:%d, send sync-request-vote to %s:%d, {term:%lu, last-index:%ld, last-term:%lu}", pSyncNode->vgId, host,
port, pMsg->term, pMsg->lastLogTerm, pMsg->lastLogIndex);
} while (0);
SRpcMsg rpcMsg;
syncRequestVote2RpcMsg(pMsg, &rpcMsg);
syncNodeSendMsgById(destRaftId, pSyncNode, &rpcMsg);
......
......@@ -370,6 +370,15 @@ bool syncIsReady(int64_t rid) {
ASSERT(rid == pSyncNode->rid);
bool b = (pSyncNode->state == TAOS_SYNC_STATE_LEADER) && pSyncNode->restoreFinish;
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
// if false, set error code
if (false == b) {
if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
terrno = TSDB_CODE_SYN_NOT_LEADER;
} else {
terrno = TSDB_CODE_APP_NOT_READY;
}
}
return b;
}
......@@ -480,12 +489,30 @@ void syncGetEpSet(int64_t rid, SEpSet* pEpSet) {
snprintf(pEpSet->eps[i].fqdn, sizeof(pEpSet->eps[i].fqdn), "%s", (pSyncNode->pRaftCfg->cfg.nodeInfo)[i].nodeFqdn);
pEpSet->eps[i].port = (pSyncNode->pRaftCfg->cfg.nodeInfo)[i].nodePort;
(pEpSet->numOfEps)++;
sInfo("syncGetEpSet index:%d %s:%d", i, pEpSet->eps[i].fqdn, pEpSet->eps[i].port);
sInfo("vgId:%d sync get epset: index:%d %s:%d", pSyncNode->vgId, i, pEpSet->eps[i].fqdn, pEpSet->eps[i].port);
}
pEpSet->inUse = pSyncNode->pRaftCfg->cfg.myIndex;
sInfo("vgId:%d sync get epset in-use:%d", pSyncNode->vgId, pEpSet->inUse);
sInfo("syncGetEpSet pEpSet->inUse:%d ", pEpSet->inUse);
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
}
void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
memset(pEpSet, 0, sizeof(*pEpSet));
return;
}
ASSERT(rid == pSyncNode->rid);
pEpSet->numOfEps = 0;
for (int i = 0; i < pSyncNode->pRaftCfg->cfg.replicaNum; ++i) {
snprintf(pEpSet->eps[i].fqdn, sizeof(pEpSet->eps[i].fqdn), "%s", (pSyncNode->pRaftCfg->cfg.nodeInfo)[i].nodeFqdn);
pEpSet->eps[i].port = (pSyncNode->pRaftCfg->cfg.nodeInfo)[i].nodePort;
(pEpSet->numOfEps)++;
sInfo("vgId:%d sync get retry epset: index:%d %s:%d", pSyncNode->vgId, i, pEpSet->eps[i].fqdn, pEpSet->eps[i].port);
}
pEpSet->inUse = (pSyncNode->pRaftCfg->cfg.myIndex + 1) % pEpSet->numOfEps;
sInfo("vgId:%d sync get retry epset in-use:%d", pSyncNode->vgId, pEpSet->inUse);
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
}
......
......@@ -224,8 +224,8 @@ int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, c
uint16_t port;
syncUtilU642Addr(destRaftId->addr, host, sizeof(host), &port);
sDebug(
"vgId:%d, send sync-append-entries to %s:%d, term:%lu, pre-index:%ld, pre-term:%lu, pterm:%lu, commit:%ld, "
"datalen:%d",
"vgId:%d, send sync-append-entries to %s:%d, {term:%lu, pre-index:%ld, pre-term:%lu, pterm:%lu, commit:%ld, "
"datalen:%d}",
pSyncNode->vgId, host, port, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->privateTerm,
pMsg->commitIndex, pMsg->dataLen);
} while (0);
......
......@@ -99,15 +99,20 @@ static bool syncNodeOnRequestVoteLogOK(SSyncNode* pSyncNode, SyncRequestVote* pM
int32_t syncNodeOnRequestVoteSnapshotCb(SSyncNode* ths, SyncRequestVote* pMsg) {
int32_t ret = 0;
// print log
char logBuf[128] = {0};
snprintf(logBuf, sizeof(logBuf), "recv SyncRequestVote, currentTerm:%lu", ths->pRaftStore->currentTerm);
syncRequestVoteLog2(logBuf, pMsg);
// if already drop replica, do not process
if (!syncNodeInRaftGroup(ths, &(pMsg->srcId)) && !ths->pRaftCfg->isStandBy) {
sInfo("recv SyncRequestVote maybe replica already dropped");
return ret;
do {
char logBuf[256];
char host[64];
uint16_t port;
syncUtilU642Addr(pMsg->srcId.addr, host, sizeof(host), &port);
snprintf(logBuf, sizeof(logBuf),
"recv sync-request-vote from %s:%d, term:%lu, lindex:%ld, lterm:%lu, maybe replica already dropped",
host, port, pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm);
syncNodeEventLog(ths, logBuf);
} while (0);
return -1;
}
// maybe update term
......@@ -135,10 +140,22 @@ int32_t syncNodeOnRequestVoteSnapshotCb(SSyncNode* ths, SyncRequestVote* pMsg) {
pReply->term = ths->pRaftStore->currentTerm;
pReply->voteGranted = grant;
// trace log
do {
char logBuf[256];
char host[64];
uint16_t port;
syncUtilU642Addr(pMsg->srcId.addr, host, sizeof(host), &port);
snprintf(logBuf, sizeof(logBuf),
"recv sync-request-vote from %s:%d, term:%lu, lindex:%ld, lterm:%lu, reply-grant:%d", host, port,
pMsg->term, pMsg->lastLogIndex, pMsg->lastLogTerm, pReply->voteGranted);
syncNodeEventLog(ths, logBuf);
} while (0);
SRpcMsg rpcMsg;
syncRequestVoteReply2RpcMsg(pReply, &rpcMsg);
syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg);
syncRequestVoteReplyDestroy(pReply);
return ret;
return 0;
}
\ No newline at end of file
......@@ -125,6 +125,7 @@ typedef struct {
STransMsg* pRsp; // for synchronous API
tsem_t* pSem; // for synchronous API
SCvtAddr cvtAddr;
bool setMaxRetry;
int hThrdIdx;
} STransConnCtx;
......
......@@ -980,7 +980,7 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
tTrace("try to send req to next node");
pMsg->st = taosGetTimestampUs();
pCtx->retryCount += 1;
if (pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
if (pResp->code == TSDB_CODE_RPC_NETWORK_UNAVAIL && pCtx->setMaxRetry == false) {
if (pCtx->retryCount < pEpSet->numOfEps * 3) {
pEpSet->inUse = (++pEpSet->inUse) % pEpSet->numOfEps;
if (pThrd->quit == false) {
......@@ -997,6 +997,7 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
}
}
} else if (pCtx->retryCount < TRANS_RETRY_COUNT_LIMIT) {
pCtx->setMaxRetry = true;
if (pResp->contLen == 0) {
pEpSet->inUse = (++pEpSet->inUse) % pEpSet->numOfEps;
transPrintEpSet(&pCtx->epSet);
......@@ -1012,8 +1013,10 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
pCtx->retryCount + 1, TRANS_RETRY_COUNT_LIMIT);
}
if (pThrd->quit == false) {
if (pConn->status != ConnInPool) {
addConnToPool(pThrd->pool, pConn);
if (pResp->code != TSDB_CODE_RPC_NETWORK_UNAVAIL) {
if (pConn->status != ConnInPool) addConnToPool(pThrd->pool, pConn);
} else {
transUnrefCliHandle(pConn);
}
STaskArg* arg = taosMemoryMalloc(sizeof(STaskArg));
arg->param1 = pMsg;
......
......@@ -23,7 +23,7 @@ int32_t walRestoreFromSnapshot(SWal *pWal, int64_t ver) {
void *pIter = NULL;
while (1) {
taosHashIterate(pWal->pRefHash, pIter);
pIter = taosHashIterate(pWal->pRefHash, pIter);
if (pIter == NULL) break;
SWalRef *pRef = (SWalRef *)pIter;
if (pRef->ver != -1) {
......@@ -309,7 +309,7 @@ static int walWriteIndex(SWal *pWal, int64_t ver, int64_t offset) {
SWalIdxEntry entry = {.ver = ver, .offset = offset};
/*int64_t idxOffset = taosLSeekFile(pWal->pWriteIdxTFile, 0, SEEK_CUR);*/
/*wDebug("write index: ver: %ld, offset: %ld, at %ld", ver, offset, idxOffset);*/
int size = taosWriteFile(pWal->pWriteIdxTFile, &entry, sizeof(SWalIdxEntry));
int64_t size = taosWriteFile(pWal->pWriteIdxTFile, &entry, sizeof(SWalIdxEntry));
if (size != sizeof(SWalIdxEntry)) {
terrno = TAOS_SYSTEM_ERROR(errno);
// TODO truncate
......
......@@ -8,11 +8,12 @@ function usage() {
echo -e "\t -l log dir"
echo -e "\t -e enterprise edition"
echo -e "\t -o default timeout value"
echo -e "\t -w log web server"
echo -e "\t -h help"
}
ent=0
while getopts "m:t:b:l:o:eh" opt; do
while getopts "m:t:b:l:o:w:eh" opt; do
case $opt in
m)
config_file=$OPTARG
......@@ -32,6 +33,9 @@ while getopts "m:t:b:l:o:eh" opt; do
o)
timeout_param="-o $OPTARG"
;;
w)
web_server=$OPTARG
;;
h)
usage
exit 0
......@@ -64,10 +68,11 @@ if [ ! -f $t_file ]; then
exit 1
fi
date_tag=`date +%Y%m%d-%H%M%S`
test_log_dir=${branch}_${date_tag}
if [ -z $log_dir ]; then
log_dir="log/${branch}_${date_tag}"
log_dir="log/${test_log_dir}"
else
log_dir="$log_dir/${branch}_${date_tag}"
log_dir="$log_dir/${test_log_dir}"
fi
hosts=()
......@@ -190,44 +195,54 @@ function run_thread() {
# echo "$thread_no $count $cmd"
local ret=0
local redo_count=1
local case_log_file=$log_dir/${case_file}.txt
start_time=`date +%s`
local case_index=`flock -x $lock_file -c "sh -c \"echo \\\$(( \\\$( cat $index_file ) + 1 )) | tee $index_file\""`
case_index=`printf "%5d" $case_index`
local case_info=`echo "$line"|cut -d, -f 3,4`
while [ ${redo_count} -lt 6 ]; do
if [ -f $log_dir/$case_file.log ]; then
cp $log_dir/$case_file.log $log_dir/$case_file.${redo_count}.redolog
if [ -f $case_log_file ]; then
cp $case_log_file $log_dir/$case_file.${redo_count}.redotxt
fi
echo "${hosts[index]}-${thread_no} order:${count}, redo:${redo_count} task:${line}" >$log_dir/$case_file.log
echo -e "\e[33m >>>>> \e[0m ${case_cmd}"
date >>$log_dir/$case_file.log
# $cmd 2>&1 | tee -a $log_dir/$case_file.log
echo "${hosts[index]}-${thread_no} order:${count}, redo:${redo_count} task:${line}" >$case_log_file
local current_time=`date "+%Y-%m-%d %H:%M:%S"`
echo -e "$case_index \e[33m START >>>>> \e[0m ${case_info} \e[33m[$current_time]\e[0m"
echo "$current_time" >>$case_log_file
local real_start_time=`date +%s`
# $cmd 2>&1 | tee -a $case_log_file
# ret=${PIPESTATUS[0]}
$cmd >>$log_dir/$case_file.log 2>&1
$cmd >>$case_log_file 2>&1
ret=$?
echo "${hosts[index]} `date` ret:${ret}" >>$log_dir/$case_file.log
local real_end_time=`date +%s`
local time_elapsed=$(( real_end_time - real_start_time ))
echo "execute time: ${time_elapsed}s" >>$case_log_file
current_time=`date "+%Y-%m-%d %H:%M:%S"`
echo "${hosts[index]} $current_time exit code:${ret}" >>$case_log_file
if [ $ret -eq 0 ]; then
break
fi
redo=0
grep -q "wait too long for taosd start" $log_dir/$case_file.log
grep -q "wait too long for taosd start" $case_log_file
if [ $? -eq 0 ]; then
redo=1
fi
grep -q "kex_exchange_identification: Connection closed by remote host" $log_dir/$case_file.log
grep -q "kex_exchange_identification: Connection closed by remote host" $case_log_file
if [ $? -eq 0 ]; then
redo=1
fi
grep -q "ssh_exchange_identification: Connection closed by remote host" $log_dir/$case_file.log
grep -q "ssh_exchange_identification: Connection closed by remote host" $case_log_file
if [ $? -eq 0 ]; then
redo=1
fi
grep -q "kex_exchange_identification: read: Connection reset by peer" $log_dir/$case_file.log
grep -q "kex_exchange_identification: read: Connection reset by peer" $case_log_file
if [ $? -eq 0 ]; then
redo=1
fi
grep -q "Database not ready" $log_dir/$case_file.log
grep -q "Database not ready" $case_log_file
if [ $? -eq 0 ]; then
redo=1
fi
grep -q "Unable to establish connection" $log_dir/$case_file.log
grep -q "Unable to establish connection" $case_log_file
if [ $? -eq 0 ]; then
redo=1
fi
......@@ -240,11 +255,18 @@ function run_thread() {
redo_count=$(( redo_count + 1 ))
done
end_time=`date +%s`
echo >>$log_dir/$case_file.log
echo "${hosts[index]} execute time: $(( end_time - start_time ))s" >>$log_dir/$case_file.log
echo >>$case_log_file
total_time=$(( end_time - start_time ))
echo "${hosts[index]} total time: ${total_time}s" >>$case_log_file
# echo "$thread_no ${line} DONE"
if [ $ret -ne 0 ]; then
flock -x $lock_file -c "echo \"${hosts[index]} ret:${ret} ${line}\" >>$log_dir/failed.log"
if [ $ret -eq 0 ]; then
echo -e "$case_index \e[34m DONE <<<<< \e[0m ${case_info} \e[34m[${total_time}s]\e[0m \e[32m success\e[0m"
else
if [ ! -z ${web_server} ]; then
flock -x $lock_file -c "echo -e \"${hosts[index]} ret:${ret} ${line}\n ${web_server}/$test_log_dir/${case_file}.txt\" >>${failed_case_file}"
else
flock -x $lock_file -c "echo -e \"${hosts[index]} ret:${ret} ${line}\n log file: ${case_log_file}\" >>${failed_case_file}"
fi
mkdir -p $log_dir/${case_file}.coredump
local remote_coredump_dir="${workdirs[index]}/tmp/thread_volume/$thread_no/coredump"
local scpcmd="sshpass -p ${passwords[index]} scp -o StrictHostKeyChecking=no -r ${usernames[index]}@${hosts[index]}"
......@@ -253,13 +275,15 @@ function run_thread() {
fi
cmd="$scpcmd:${remote_coredump_dir}/* $log_dir/${case_file}.coredump/"
$cmd # 2>/dev/null
local case_info=`echo "$line"|cut -d, -f 3,4`
local corefile=`ls $log_dir/${case_file}.coredump/`
echo -e "$case_info \e[31m failed\e[0m"
echo -e "$case_index \e[34m DONE <<<<< \e[0m ${case_info} \e[34m[${total_time}s]\e[0m \e[31m failed\e[0m"
echo "=========================log============================"
cat $log_dir/$case_file.log
cat $case_log_file
echo "====================================================="
echo -e "\e[34m log file: $log_dir/$case_file.log \e[0m"
echo -e "\e[34m log file: $case_log_file \e[0m"
if [ ! -z "${web_server}" ]; then
echo "${web_server}/$test_log_dir/${case_file}.txt"
fi
if [ ! -z "$corefile" ]; then
echo -e "\e[34m corefiles: $corefile \e[0m"
local build_dir=$log_dir/build_${hosts[index]}
......@@ -325,6 +349,10 @@ mkdir -p $log_dir
rm -rf $log_dir/*
task_file=$log_dir/$$.task
lock_file=$log_dir/$$.lock
index_file=$log_dir/case_index.txt
stat_file=$log_dir/stat.txt
failed_case_file=$log_dir/failed.txt
echo "0" >$index_file
i=0
j=0
......@@ -350,15 +378,45 @@ rm -f $lock_file
rm -f $task_file
# docker ps -a|grep -v CONTAINER|awk '{print $1}'|xargs docker rm -f
echo "====================================================================="
echo "log dir: $log_dir"
total_cases=`cat $index_file`
failed_cases=0
if [ -f $failed_case_file ]; then
if [ ! -z "$web_server" ]; then
failed_cases=`grep -v "$web_server" $failed_case_file|wc -l`
else
failed_cases=`grep -v "log file:" $failed_case_file|wc -l`
fi
fi
success_cases=$(( total_cases - failed_cases ))
echo "Total Cases: $total_cases" >$stat_file
echo "Successful: $success_cases" >>$stat_file
echo "Failed: $failed_cases" >>$stat_file
cat $stat_file
RET=0
i=1
if [ -f "$log_dir/failed.log" ]; then
if [ -f "${failed_case_file}" ]; then
echo "====================================================="
while read line; do
if [ ! -z "${web_server}" ]; then
echo "$line"|grep -q "${web_server}"
if [ $? -eq 0 ]; then
echo " $line"
continue
fi
else
echo "$line"|grep -q "log file:"
if [ $? -eq 0 ]; then
echo " $line"
continue
fi
fi
line=`echo "$line"|cut -d, -f 3,4`
echo -e "$i. $line \e[31m failed\e[0m" >&2
i=$(( i + 1 ))
done <$log_dir/failed.log
done <${failed_case_file}
RET=1
fi
......
......@@ -15,7 +15,7 @@ from util.sql import tdSql
class TDSetSql:
def init(self, conn, logSql):
tdSql.init(conn.cursor(), logSql)
self.stbname = 'stb'
def set_create_normaltable_sql(self, ntbname='ntb',
......
......@@ -90,6 +90,7 @@
./test.sh -f tsim/stream/triggerInterval0.sim
# ./test.sh -f tsim/stream/triggerSession0.sim
./test.sh -f tsim/stream/partitionby.sim
./test.sh -f tsim/stream/partitionby1.sim
./test.sh -f tsim/stream/schedSnode.sim
./test.sh -f tsim/stream/windowClose.sim
......
......@@ -16,12 +16,12 @@ rem echo NODE: %NODE%
set SCRIPT_DIR=%~dp0..\
rem echo SCRIPT_DIR: %SCRIPT_DIR%
set BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\
echo %cd% | grep community > nul && set "BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\" || set "BUILD_DIR=%SCRIPT_DIR%..\..\debug\build\bin\"
set TSIM=%BUILD_DIR%tsim
rem echo BUILD_DIR: %BUILD_DIR%
rem echo TSIM: %TSIM%
set SIM_DIR=%SCRIPT_DIR%..\..\..\sim\
echo %cd% | grep community > nul && set "SIM_DIR=%SCRIPT_DIR%..\..\..\sim\" || set "SIM_DIR=%SCRIPT_DIR%..\..\sim\"
rem echo SIM_DIR: %SIM_DIR%
set NODE_DIR=%SIM_DIR%%NODE_NAME%\
......
......@@ -4,7 +4,7 @@ echo Executing copy_udf.bat
set SCRIPT_DIR=%cd%
echo SCRIPT_DIR: %SCRIPT_DIR%
cd ..\..\..
echo %cd% | grep community > nul && cd ..\..\.. || cd ..\..
set TAOS_DIR=%cd%
echo find udf library in %TAOS_DIR%
set UDF1_DIR=%TAOS_DIR%\debug\build\lib\udf1.dll
......
......@@ -13,12 +13,12 @@ rem echo NODE: %NODE%
set SCRIPT_DIR=%~dp0..\
rem echo SCRIPT_DIR: %SCRIPT_DIR%
set BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\
echo %cd% | grep community > nul && set "BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\" || set "BUILD_DIR=%SCRIPT_DIR%..\..\debug\build\bin\"
set TSIM=%BUILD_DIR%tsim
rem echo BUILD_DIR: %BUILD_DIR%
rem echo TSIM: %TSIM%
set SIM_DIR=%SCRIPT_DIR%..\..\..\sim\
echo %cd% | grep community > nul && set "SIM_DIR=%SCRIPT_DIR%..\..\..\sim\" || set "SIM_DIR=%SCRIPT_DIR%..\..\sim\"
rem echo SIM_DIR: %SIM_DIR%
set NODE_DIR=%SIM_DIR%%NODE_NAME%\
......
......@@ -13,12 +13,12 @@ rem echo NODE: %EXEC_OPTON%
set SCRIPT_DIR=%~dp0..\
rem echo SCRIPT_DIR: %SCRIPT_DIR%
set BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\
echo %cd% | grep community > nul && set "BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\" || set "BUILD_DIR=%SCRIPT_DIR%..\..\debug\build\bin\"
set TAOSD=%BUILD_DIR%taosd
rem echo BUILD_DIR: %BUILD_DIR%
rem echo TAOSD: %TAOSD%
set SIM_DIR=%SCRIPT_DIR%..\..\..\sim\
echo %cd% | grep community > nul && set "SIM_DIR=%SCRIPT_DIR%..\..\..\sim\" || set "SIM_DIR=%SCRIPT_DIR%..\..\sim\"
rem echo SIM_DIR: %SIM_DIR%
set NODE_DIR=%SIM_DIR%%NODE_NAME%\
......
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sleep 50
sql connect
sql create database test vgroups 4;
sql use test;
sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,tb int,tc int);
sql create table ts1 using st tags(1,1,1);
sql create table ts2 using st tags(2,2,2);
sql create table ts3 using st tags(3,2,2);
sql create table ts4 using st tags(4,2,2);
sql create stream stream_t1 trigger at_once into streamtST1 as select _wstartts, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st partition by tbname interval(10s);
sql insert into ts1 values(1648791213001,1,12,3,1.0);
sql insert into ts2 values(1648791213001,1,12,3,1.0);
sql insert into ts3 values(1648791213001,1,12,3,1.0);
sql insert into ts4 values(1648791213001,1,12,3,1.0);
$loop_count = 0
loop0:
sleep 300
sql select * from streamtST1;
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
if $rows != 4 then
print =====rows=$rows
goto loop0
endi
print =====loop0
sql create database test1 vgroups 1;
sql use test1;
sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int);
sql create table ts1 using st tags(1,2,3);
sql create table ts2 using st tags(1,3,4);
sql create table ts3 using st tags(1,4,5);
sql create stream streams1 trigger at_once into streamt as select _wstartts, count(*) c1, count(a) c2 from st partition by tbname interval(10s);
sql insert into ts1 values(1648791211000,1,2,3);
sql insert into ts2 values(1648791211000,1,2,3);
$loop_count = 0
loop1:
sleep 300
sql select * from streamt;
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
if $rows != 2 then
print =====rows=$rows
goto loop1
endi
print =====loop1
sql create database test2 vgroups 1;
sql use test2;
sql create stable st(ts timestamp,a int,b int,c int,id int) tags(ta int,tb int,tc int);
sql create table ts1 using st tags(1,1,1);
sql create table ts2 using st tags(2,2,2);
sql create stream stream_t2 trigger at_once into streamtST as select _wstartts, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6, max(id) c7 from st partition by tbname interval(10s) ;
sql insert into ts1 values(1648791211000,1,2,3,1);
sql insert into ts1 values(1648791222001,2,2,3,2);
sql insert into ts2 values(1648791211000,1,2,3,3);
sql insert into ts2 values(1648791222001,2,2,3,4);
sql insert into ts2 values(1648791222002,2,2,3,5);
sql insert into ts2 values(1648791222002,2,2,3,6);
sql insert into ts1 values(1648791211000,1,2,3,1);
sql insert into ts1 values(1648791222001,2,2,3,2);
sql insert into ts2 values(1648791211000,1,2,3,3);
sql insert into ts2 values(1648791222001,2,2,3,4);
$loop_count = 0
loop2:
sleep 300
sql select * from streamtST;
$loop_count = $loop_count + 1
if $loop_count == 10 then
return -1
endi
if $data01 != 1 then
print =====data01=$data01
goto loop2
endi
if $data02 != 1 then
print =====data02=$data02
goto loop2
endi
if $data03 != 1 then
print =====data03=$data03
goto loop2
endi
if $data04 != 2 then
print =====data04=$data04
goto loop2
endi
print =====loop2
system sh/stop_dnodes.sh
......@@ -197,22 +197,22 @@ print $switch_loop_cnt
if $switch_loop_cnt == 1 then
sql show vgroups
$dnodeId = $data[1][3]
$dnodeId = $data[0][3]
$dnodeId = dnode . $dnodeId
goto switch_leader_to_offine_loop
elif $switch_loop_cnt == 2 then
sql show vgroups
$dnodeId = $data[2][3]
$dnodeId = $data[0][3]
$dnodeId = dnode . $dnodeId
goto switch_leader_to_offine_loop
elif $switch_loop_cnt == 3 then
sql show vgroups
$dnodeId = $data[3][3]
$dnodeId = $data[0][3]
$dnodeId = dnode . $dnodeId
goto switch_leader_to_offine_loop
elif $switch_loop_cnt == 4 then
sql show vgroups
$dnodeId = $data[4][3]
$dnodeId = $data[0][3]
$dnodeId = dnode . $dnodeId
goto switch_leader_to_offine_loop
else
......
......@@ -290,22 +290,22 @@ print $switch_loop_cnt
if $switch_loop_cnt == 1 then
sql show vgroups
$dnodeId = $data[1][3]
$dnodeId = $data[0][3]
$dnodeId = dnode . $dnodeId
goto switch_leader_to_offine_loop
elif $switch_loop_cnt == 2 then
sql show vgroups
$dnodeId = $data[2][3]
$dnodeId = $data[0][3]
$dnodeId = dnode . $dnodeId
goto switch_leader_to_offine_loop
elif $switch_loop_cnt == 3 then
sql show vgroups
$dnodeId = $data[3][3]
$dnodeId = $data[0][3]
$dnodeId = dnode . $dnodeId
goto switch_leader_to_offine_loop
elif $switch_loop_cnt == 4 then
sql show vgroups
$dnodeId = $data[4][3]
$dnodeId = $data[0][3]
$dnodeId = dnode . $dnodeId
goto switch_leader_to_offine_loop
else
......
......@@ -227,22 +227,22 @@ print $switch_loop_cnt
if $switch_loop_cnt == 1 then
sql show vgroups
$dnodeId = $data[1][3]
$dnodeId = $data[0][3]
$dnodeId = dnode . $dnodeId
goto switch_leader_to_offine_loop
elif $switch_loop_cnt == 2 then
sql show vgroups
$dnodeId = $data[2][3]
$dnodeId = $data[0][3]
$dnodeId = dnode . $dnodeId
goto switch_leader_to_offine_loop
elif $switch_loop_cnt == 3 then
sql show vgroups
$dnodeId = $data[3][3]
$dnodeId = $data[0][3]
$dnodeId = dnode . $dnodeId
goto switch_leader_to_offine_loop
elif $switch_loop_cnt == 4 then
sql show vgroups
$dnodeId = $data[4][3]
$dnodeId = $data[0][3]
$dnodeId = dnode . $dnodeId
goto switch_leader_to_offine_loop
else
......
@echo off
set EXEC_OPTON=start
set DB_NAME=db
set CDB_NAME=db
set /a POLL_DELAY=5
set /a VALGRIND=0
set SIGNAL=SIGINT
set /a SHOW_MSG=0
set /a SHOW_ROW=0
set /a EXP_USE_SNAPSHOT=0
:param
if "%1"=="" (
goto :end
)
if %1 == -d ( set "DB_NAME=%2" && shift && shift && goto :param )
if %1 == -g ( set "SHOW_MSG=%2" && shift && shift && goto :param )
if %1 == -r ( set "SHOW_ROW=%2" && shift && shift && goto :param )
if %1 == -s ( set "EXEC_OPTON=%2" && shift && shift && goto :param )
if %1 == -v ( set "VALGRIND=1" && shift && goto :param )
if %1 == -y ( set "POLL_DELAY=%2" && shift && shift && goto :param )
if %1 == -x ( set "SIGNAL=%2" && shift && shift && goto :param )
if %1 == -w ( set "CDB_NAME=%2" && shift && shift && goto :param )
if %1 == -e ( set "EXP_USE_SNAPSHOT=%2" && shift && shift && goto :param )
echo unkown argument %1
goto :eof
:end
echo EXEC_OPTON %EXEC_OPTON%
echo DB_NAME %DB_NAME%
echo CDB_NAME %CDB_NAME%
echo POLL_DELAY %POLL_DELAY%
echo VALGRIND %VALGRIND%
echo SIGNAL %SIGNAL%
echo SHOW_MSG %SHOW_MSG%
echo SHOW_ROW %SHOW_ROW%
echo EXP_USE_SNAPSHOT %EXP_USE_SNAPSHOT%
echo %cd% | grep community > nul && cd ..\..\.. || cd ..\..
set BUILD_DIR=%cd%\debug\build\bin
set SIM_DIR=%cd%\sim
set PRG_DIR=%SIM_DIR%\tsim
set CFG_DIR=%PRG_DIR%\cfg
set LOG_DIR=%PRG_DIR%\log
set PROGRAM=%BUILD_DIR%\tmq_sim.exe
echo ------------------------------------------------------------------------
echo BUILD_DIR : %BUILD_DIR%
echo SIM_DIR : %SIM_DIR%
echo CFG_DIR : %CFG_DIR%
echo PRG_DIR : %PRG_DIR%
echo CFG_DIR : %CFG_DIR%
echo LOG_DIR : %LOG_DIR%
echo PROGRAM : %PROGRAM%
echo POLL_DELAY : %POLL_DELAY%
echo DB_NAME : %DB_NAME%
echo ------------------------------------------------------------------------
if "%EXEC_OPTON%" == "start" (
echo mintty -h never %PROGRAM% -c %CFG_DIR% -y %POLL_DELAY% -d %DB_NAME% -g %SHOW_MSG% -r %SHOW_ROW% -w %CDB_NAME% -e %EXP_USE_SNAPSHOT%
mintty -h never %PROGRAM% -c %CFG_DIR% -y %POLL_DELAY% -d %DB_NAME% -g %SHOW_MSG% -r %SHOW_ROW% -w %CDB_NAME% -e %EXP_USE_SNAPSHOT%
) else (
if "%SIGNAL%" == "SIGKILL" ( ps | grep tmq_sim | awk '{print $2}' | xargs kill -9 ) else ( ps | grep tmq_sim | awk '{print $2}' | xargs kill -SIGINT )
)
goto :eof
......@@ -6,12 +6,12 @@ rem echo Start TDengine Testing Case ...
set "SCRIPT_DIR=%~dp0"
rem echo SCRIPT_DIR: %SCRIPT_DIR%
set "BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\"
echo %cd% | grep community > nul && set "BUILD_DIR=%SCRIPT_DIR%..\..\..\debug\build\bin\" || set "BUILD_DIR=%SCRIPT_DIR%..\..\debug\build\bin\"
set "TSIM=%BUILD_DIR%tsim"
rem echo BUILD_DIR: %BUILD_DIR%
rem echo TSIM: %TSIM%
set "SIM_DIR=%SCRIPT_DIR%..\..\..\sim\"
echo %cd% | grep community > nul && set "SIM_DIR=%SCRIPT_DIR%..\..\..\sim\" || set "SIM_DIR=%SCRIPT_DIR%..\..\sim\"
rem echo SIM_DIR: %SIM_DIR%
set "TSIM_DIR=%SIM_DIR%tsim\"
......
......@@ -44,13 +44,13 @@ def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key
tdLog.info ("taos cmd: %s" % taosCmd)
child = taosExpect.spawn(taosCmd, timeout=3)
child = taosExpect.spawn(taosCmd, timeout=10)
#output = child.readline()
#print (output.decode())
if len(expectString) != 0:
i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=6)
i = child.expect([expectString, taosExpect.TIMEOUT, taosExpect.EOF], timeout=10)
else:
i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=6)
i = child.expect([taosExpect.TIMEOUT, taosExpect.EOF], timeout=10)
if platform.system().lower() == 'windows':
retResult = child.before
......
此差异已折叠。
......@@ -12,7 +12,37 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
# name of normal table
self.ntbname = 'ntb'
# name of stable
self.stbname = 'stb'
# structure of column
self.column_dict = {
'ts':'timestamp',
'c1':'int',
'c2':'float',
'c3':'binary(20)',
'c4':'nchar(20)'
}
# structure of tag
self.tag_dict = {
't0':'int'
}
# number of child tables
self.tbnum = 2
# values of tag,the number of values should equal to tbnum
self.tag_values = [
f'10',
f'100'
]
# values of rows, structure should be same as column
self.values_list = [
f'now,10,99.99,"2020-1-1 00:00:00"',
f'today(),100,11.111,22.222222'
]
self.error_param = [1,'now()']
def run(self): # sourcery skip: extract-duplicate-method
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create tables==========")
......
......@@ -60,10 +60,10 @@ class TDTestCase:
tdSql.checkRows(1)
tdSql.checkData(0, 1, None)
#!bug TD-16561
# for i in ['stb','db.stb']:
# tdSql.query(f"select first(*) from {i}")
# tdSql.checkRows(1)
# tdSql.checkData(0, 1, None)
for i in ['stb','db.stb']:
tdSql.query(f"select first(*) from {i}")
tdSql.checkRows(1)
tdSql.checkData(0, 1, None)
for i in column_list:
for j in ['stb_1','db.stb_1','stb_1','db.stb_1']:
tdSql.query(f"select first({i}) from {j}")
......@@ -125,10 +125,10 @@ class TDTestCase:
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags('beijing')")
tdSql.execute(f"insert into {stbname}_{i}(ts) values(%d)" % (self.ts - 1-i))
#!bug TD-16561
# for i in [f'{stbname}', f'{dbname}.{stbname}']:
# tdSql.query(f"select first(*) from {i}")
# tdSql.checkRows(1)
# tdSql.checkData(0, 1, None)
for i in [f'{stbname}', f'{dbname}.{stbname}']:
tdSql.query(f"select first(*) from {i}")
tdSql.checkRows(1)
tdSql.checkData(0, 1, None)
tdSql.query('show tables')
vgroup_list = []
for i in range(len(tdSql.queryResult)):
......@@ -170,8 +170,8 @@ class TDTestCase:
elif 'nchar' in v:
tdSql.checkData(0, 0, f'{self.nchar_str}1')
#!bug TD-16569
# tdSql.query(f"select first(*),last(*) from {stbname} where ts < 23 interval(1s)")
# tdSql.checkRows(0)
tdSql.query(f"select first(*),last(*) from {stbname} where ts < 23 interval(1s)")
tdSql.checkRows(0)
tdSql.execute(f'drop database {dbname}')
......
import taos
import sys
import datetime
import inspect
from util.log import *
from util.sql import *
from util.cases import *
import random
class TDTestCase:
updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)
self.tb_nums = 10
self.row_nums = 20
self.ts = 1434938400000
self.time_step = 1000
def prepare_tag_datas(self):
# prepare datas
tdSql.execute(
"create database if not exists testdb keep 3650 duration 1000")
tdSql.execute(" use testdb ")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(
f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
"insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute(
"insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
"insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
"insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
"insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
"insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
"insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
def function_for_null_data(self):
function_names = ["abs" , "floor" , "ceil" , "round"]
for function_name in function_names:
scalar_sql_1 = f"select {function_name}(c1)/0 from t1 group by c1 order by c1"
scalar_sql_2 = f"select {function_name}(c1/0) from t1 group by c1 order by c1"
scalar_sql_3 = f"select {function_name}(NULL) from t1 group by c1 order by c1"
tdSql.query(scalar_sql_1)
tdSql.checkRows(10)
tdSql.checkData(0,0,None)
tdSql.checkData(9,0,None)
tdSql.query(scalar_sql_2)
tdSql.checkRows(10)
tdSql.checkData(0,0,None)
tdSql.checkData(9,0,None)
tdSql.query(scalar_sql_3)
tdSql.checkRows(10)
tdSql.checkData(0,0,None)
tdSql.checkData(9,0,None)
function_names = ["sin" ,"cos" ,"tan" ,"asin" ,"acos" ,"atan"]
PI = 3.141592654
# sin
tdSql.query(" select sin(c1/0) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select sin(NULL) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select sin(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,0.000000000)
tdSql.query(f" select sin({PI/2}) from t1 group by c1 order by c1")
tdSql.checkData(9,0,1.0)
tdSql.query(" select sin(1000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,0.826879541)
# cos
tdSql.query(" select cos(c1/0) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select cos(NULL) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select cos(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,1.000000000)
tdSql.query(f" select cos({PI}/2) from t1 group by c1 order by c1")
tdSql.query(" select cos(1000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,0.562379076)
# tan
tdSql.query(" select tan(c1/0) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select tan(NULL) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select tan(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,0.000000000)
tdSql.query(f" select tan({PI}/2) from t1 group by c1 order by c1")
tdSql.query(" select tan(1000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,1.470324156)
# atan
tdSql.query(" select atan(c1/0) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select atan(NULL) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select atan(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,0.000000000)
tdSql.query(f" select atan({PI}/2) from t1 group by c1 order by c1")
tdSql.checkData(9,0,1.003884822)
tdSql.query(" select atan(1000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,1.569796327)
# asin
tdSql.query(" select asin(c1/0) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select asin(NULL) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select asin(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,0.000000000)
tdSql.query(f" select asin({PI}/2) from t1 group by c1 order by c1")
tdSql.query(" select asin(1000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
# acos
tdSql.query(" select acos(c1/0) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select acos(NULL) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select acos(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,1.570796327)
tdSql.query(f" select acos({PI}/2) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select acos(1000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
function_names = ["log" ,"pow"]
# log
tdSql.query(" select log(-10) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select log(NULL ,2) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select log(c1)/0 from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(f" select log(0.00) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
# pow
tdSql.query(" select pow(c1,10000) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select pow(c1,2)/0 from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(" select pow(NULL,2) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
tdSql.query(f" select pow(c1/0 ,1 ) from t1 group by c1 order by c1")
tdSql.checkData(9,0,None)
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
self.prepare_tag_datas()
tdLog.printNoPrefix("==========step2:test errors ==============")
self.function_for_null_data()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
import taos
import sys
import datetime
import inspect
from util.log import *
from util.sql import *
from util.cases import *
import random
class TDTestCase:
updatecfgDict = {'debugFlag': 143, "cDebugFlag": 143, "uDebugFlag": 143, "rpcDebugFlag": 143, "tmrDebugFlag": 143,
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)
self.tb_nums = 10
self.row_nums = 20
self.ts = 1434938400000
self.time_step = 1000
def insert_datas_and_check_irate(self ,tbnums , rownums , time_step ):
tdLog.info(" prepare datas for auto check irate function ")
tdSql.execute(" create database test ")
tdSql.execute(" use test ")
tdSql.execute(" create stable stb (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\
c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int)")
for tbnum in range(tbnums):
tbname = "sub_tb_%d"%tbnum
tdSql.execute(" create table %s using stb tags(%d) "%(tbname , tbnum))
ts = self.ts
for row in range(rownums):
ts = self.ts + time_step*row
c1 = random.randint(0,1000)
c2 = random.randint(0,100000)
c3 = random.randint(0,125)
c4 = random.randint(0,125)
c5 = random.random()/1.0
c6 = random.random()/1.0
c7 = "'true'"
c8 = "'binary_val'"
c9 = "'nchar_val'"
c10 = ts
tdSql.execute(f" insert into {tbname} values ({ts},{c1},{c2},{c3},{c4},{c5},{c6},{c7},{c8},{c9},{c10})")
tdSql.execute("use test")
tbnames = ["stb", "sub_tb_1"]
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
for tbname in tbnames:
tdSql.query("desc {}".format(tbname))
coltypes = tdSql.queryResult
for coltype in coltypes:
colname = coltype[0]
if coltype[1] in support_types and coltype[-1] != "TAG" :
irate_sql = "select irate({}) from (select * from {} order by tbname ) ".format(colname, tbname)
origin_sql = "select ts , {} , cast(ts as bigint) from (select ts , {} from {} order by ts desc limit 2 offset 0 ) order by ts".format(colname,colname, tbname)
tdSql.query(irate_sql)
irate_result = tdSql.queryResult
tdSql.query(origin_sql)
origin_result = tdSql.queryResult
irate_value = irate_result[0][0]
if origin_result[1][-1] - origin_result[0][-1] == 0:
comput_irate_value = 0
elif (origin_result[1][1] - origin_result[0][1])<0:
comput_irate_value = origin_result[1][1]*1000/( origin_result[1][-1] - origin_result[0][-1])
else:
comput_irate_value = (origin_result[1][1] - origin_result[0][1])*1000/( origin_result[1][-1] - origin_result[0][-1])
if comput_irate_value ==irate_value:
tdLog.info(" irate work as expected , sql is %s "% irate_sql)
else:
tdLog.exit(" irate work not as expected , sql is %s "% irate_sql)
def prepare_tag_datas(self):
# prepare datas
tdSql.execute(
"create database if not exists testdb keep 3650 duration 1000")
tdSql.execute(" use testdb ")
tdSql.execute(
'''create table stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
'''
create table t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
tdSql.execute(
f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
"insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
tdSql.execute(
"insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
"insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
"insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
tdSql.execute(
"insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
"insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
"insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
f'''insert into t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
( '2021-01-01 01:01:06.000', 3, 33333, 333, 33, 3.33, 33.33, 0, "binary3", "nchar3", now()+3a )
( '2021-05-07 01:01:10.000', 4, 44444, 444, 44, 4.44, 44.44, 1, "binary4", "nchar4", now()+4a )
( '2021-07-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2021-09-30 01:01:16.000', 5, 55555, 555, 55, 5.55, 55.55, 0, "binary5", "nchar5", now()+5a )
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
def test_errors(self):
tdSql.execute("use testdb")
error_sql_lists = [
"select irate from t1",
"select irate(-+--+c1) from t1",
# "select +-irate(c1) from t1",
# "select ++-irate(c1) from t1",
# "select ++--irate(c1) from t1",
# "select - -irate(c1)*0 from t1",
# "select irate(tbname+1) from t1 ",
"select irate(123--123)==1 from t1",
"select irate(c1) as 'd1' from t1",
"select irate(c1 ,c2 ) from t1",
"select irate(c1 ,NULL) from t1",
"select irate(,) from t1;",
"select irate(irate(c1) ab from t1)",
"select irate(c1) as int from t1",
"select irate from stb1",
# "select irate(-+--+c1) from stb1",
# "select +-irate(c1) from stb1",
# "select ++-irate(c1) from stb1",
# "select ++--irate(c1) from stb1",
# "select - -irate(c1)*0 from stb1",
# "select irate(tbname+1) from stb1 ",
"select irate(123--123)==1 from stb1",
"select irate(c1) as 'd1' from stb1",
"select irate(c1 ,c2 ) from stb1",
"select irate(c1 ,NULL) from stb1",
"select irate(,) from stb1;",
"select irate(abs(c1) ab from stb1)",
"select irate(c1) as int from stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
def support_types(self):
tdSql.execute("use testdb")
tbnames = ["stb1", "t1", "ct1", "ct2"]
support_types = ["BIGINT", "SMALLINT", "TINYINT", "FLOAT", "DOUBLE", "INT"]
for tbname in tbnames:
tdSql.query("desc {}".format(tbname))
coltypes = tdSql.queryResult
for coltype in coltypes:
colname = coltype[0]
irate_sql = "select irate({}) from {}".format(colname, tbname)
if coltype[1] in support_types:
tdSql.query(irate_sql)
else:
tdSql.error(irate_sql)
def basic_irate_function(self):
# used for empty table , ct3 is empty
tdSql.query("select irate(c1) from ct3")
tdSql.checkRows(0)
tdSql.query("select irate(c2) from ct3")
tdSql.checkRows(0)
# used for regular table
tdSql.query("select irate(c1) from t1")
tdSql.checkData(0, 0, 0.000000386)
# used for sub table
tdSql.query("select irate(abs(c1+c2)) from ct1")
tdSql.checkData(0, 0, 0.000000000)
# mix with common col
tdSql.error("select c1, irate(c1) from ct1")
# mix with common functions
tdSql.error("select irate(c1), abs(c1) from ct4 ")
# agg functions mix with agg functions
tdSql.query("select irate(c1), count(c5) from stb1 partition by tbname ")
tdSql.checkData(0, 0, 0.000000000)
tdSql.checkData(1, 0, 0.000000000)
tdSql.checkData(0, 1, 13)
tdSql.checkData(1, 1, 9)
def irate_func_filter(self):
tdSql.execute("use testdb")
tdSql.query(
"select irate(c1+2)/2 from ct4 where c1>5 ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 0.000000514)
tdSql.query(
"select irate(c1+c2)/10 from ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 0.000000000)
tdSql.query(
"select irate(c1+c2)/10 from stb1 where c1 = 5 partition by tbname ")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 0.000000000)
def irate_Arithmetic(self):
pass
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
self.prepare_tag_datas()
tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
tdLog.printNoPrefix("==========step4: irate basic query ============")
self.basic_irate_function()
tdLog.printNoPrefix("==========step5: irate filter query ============")
self.irate_func_filter()
tdLog.printNoPrefix("==========step6: check result of query ============")
self.insert_datas_and_check_irate(self.tb_nums,self.row_nums,self.time_step)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
......@@ -2,7 +2,7 @@
from util.log import *
from util.sql import *
from util.cases import *
from util.sqlset import *
import platform
import os
if platform.system().lower() == 'windows':
......@@ -14,10 +14,39 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.setsql = TDSetSql()
self.arithmetic_operators = ['+','-','*','/']
self.arithmetic_values = [0,1,100,15.5]
# name of normal table
self.ntbname = 'ntb'
# name of stable
self.stbname = 'stb'
# structure of column
self.column_dict = {
'ts':'timestamp',
'c1':'int',
'c2':'float',
'c3':'double'
}
# structure of tag
self.tag_dict = {
't0':'int'
}
# number of child tables
self.tbnum = 2
# values of tag,the number of values should equal to tbnum
self.tag_values = [
f'10',
f'100'
]
# values of rows, structure should be same as column
self.values_list = [
f'now,10,99.99,11.111111',
f'today(),100,11.111,22.222222'
def run(self): # sourcery skip: extract-duplicate-method
tdSql.prepare()
# get system timezone
]
self.error_param = [1,'now()']
def get_system_timezone(self):
if platform.system().lower() == 'windows':
time_zone_1 = tzlocal.get_localzone_name()
time_zone_2 = time.strftime('(UTC, %z)')
......@@ -32,122 +61,61 @@ class TDTestCase:
time_zone_2 = os.popen('date "+(%Z, %z)"').read().strip()
time_zone = time_zone_1 + " " + time_zone_2
print("expected time zone: " + time_zone)
tdLog.printNoPrefix("==========step1:create tables==========")
tdSql.execute(
'''create table if not exists ntb
(ts timestamp, c1 int, c2 float,c3 double)
'''
)
tdSql.execute(
'''create table if not exists stb
(ts timestamp, c1 int, c2 float,c3 double) tags(t0 int)
'''
)
tdSql.execute(
'''create table if not exists stb_1 using stb tags(100)
'''
)
tdLog.printNoPrefix("==========step2:insert data==========")
tdSql.execute(
"insert into ntb values(now,10,99.99,11.111111)(today(),100,11.111,22.222222)")
tdSql.execute(
"insert into stb_1 values(now,111,99.99,11.111111)(today(),1,11.111,22.222222)")
tdLog.printNoPrefix("==========step3:query data==========")
return time_zone
def tb_type_check(self,tb_type):
if tb_type in ['normal_table','child_table']:
tdSql.checkRows(len(self.values_list))
elif tb_type == 'stable':
tdSql.checkRows(len(self.values_list*self.tbnum))
def data_check(self,timezone,tbname,tb_type):
tdSql.query(f"select timezone() from {tbname}")
self.tb_type_check(tb_type)
tdSql.checkData(0,0,timezone)
for symbol in self.arithmetic_operators:
tdSql.query(f"select timezone(){symbol}null from {tbname}")
self.tb_type_check(tb_type)
tdSql.checkData(0,0,None)
for i in self.arithmetic_values:
for symbol in self.arithmetic_operators:
tdSql.query(f"select timezone(){symbol}{i} from {tbname}")
self.tb_type_check(tb_type)
if symbol == '+':
tdSql.checkData(0,0,i)
elif symbol == '-':
tdSql.checkData(0,0,-i)
elif symbol in ['*','/','%']:
if i == 0 and symbol == '/':
tdSql.checkData(0,0,None)
else:
tdSql.checkData(0,0,0)
for param in self.error_param:
tdSql.error(f'select timezone({param}) from {tbname}')
tdSql.query(f"select * from {tbname} where timezone()='{timezone}'")
self.tb_type_check(tb_type)
def timezone_check_ntb(self,timezone):
tdSql.prepare()
tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict))
for value in self.values_list:
tdSql.execute(
f'insert into {self.ntbname} values({value})')
self.data_check(timezone,self.ntbname,'normal_table')
tdSql.execute('drop database db')
def timezone_check_stb(self,timezone):
tdSql.prepare()
tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict))
for i in range(self.tbnum):
tdSql.execute(f'create table if not exists {self.stbname}_{i} using {self.stbname} tags({self.tag_values[i]})')
for j in self.values_list:
tdSql.execute(f'insert into {self.stbname}_{i} values({j})')
self.data_check(timezone,self.stbname,'stable')
for i in range(self.tbnum):
self.data_check(timezone,f'{self.stbname}_{i}','child_table')
def run(self): # sourcery skip: extract-duplicate-method
timezone = self.get_system_timezone()
self.timezone_check_ntb(timezone)
self.timezone_check_stb(timezone)
tdSql.query("select timezone() from ntb")
tdSql.checkRows(2)
tdSql.checkData(0, 0, time_zone)
tdSql.query("select timezone() from db.ntb")
tdSql.checkRows(2)
tdSql.checkData(0, 0, time_zone)
tdSql.query("select timezone() from stb")
tdSql.checkRows(2)
tdSql.checkData(0, 0, time_zone)
tdSql.query("select timezone() from db.stb")
tdSql.checkRows(2)
tdSql.checkData(0, 0, time_zone)
tdSql.query("select timezone() from stb_1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, time_zone)
tdSql.query("select timezone() from db.stb_1 ")
tdSql.checkRows(2)
tdSql.checkData(0, 0, time_zone)
tdSql.error("select timezone(1) from stb")
tdSql.error("select timezone(1) from db.stb")
tdSql.error("select timezone(1) from ntb")
tdSql.error("select timezone(1) from db.ntb")
tdSql.error("select timezone(1) from stb_1")
tdSql.error("select timezone(1) from db.stb_1")
tdSql.error("select timezone(now()) from stb")
tdSql.error("select timezone(now()) from db.stb")
tdSql.query(f"select * from ntb where timezone()='{time_zone}'")
tdSql.checkRows(2)
tdSql.query("select timezone()+1 from ntb")
tdSql.checkRows(2)
tdSql.query("select timezone()+1 from db.ntb")
tdSql.checkRows(2)
tdSql.query("select timezone()+1 from stb")
tdSql.checkRows(2)
tdSql.query("select timezone()+1 from db.stb")
tdSql.checkRows(2)
tdSql.query("select timezone()+1 from stb_1")
tdSql.checkRows(2)
tdSql.query("select timezone()+1 from db.stb_1")
tdSql.checkRows(2)
tdSql.query("select timezone()+1.5 from ntb")
tdSql.checkRows(2)
tdSql.query("select timezone()+1.5 from db.ntb")
tdSql.checkRows(2)
tdSql.query("select timezone()-100 from ntb")
tdSql.checkRows(2)
tdSql.query("select timezone()*100 from ntb")
tdSql.checkRows(2)
tdSql.query("select timezone()/10 from ntb")
# tdSql.query("select timezone()/0 from ntb")
tdSql.query("select timezone()+null from ntb")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
tdSql.query("select timezone()-null from ntb")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
tdSql.query("select timezone()*null from ntb")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
tdSql.query("select timezone()/null from ntb")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
# tdSql.query("select timezone()")
tdSql.query("select timezone()+null from stb")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
tdSql.query("select timezone()-null from stb")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
tdSql.query("select timezone()*null from stb")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
tdSql.query("select timezone()/null from stb")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
tdSql.query("select timezone()+null from stb_1")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
tdSql.query("select timezone()-null from stb_1")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
tdSql.query("select timezone()*null from stb_1")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
tdSql.query("select timezone()/null from stb_1")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
......
......@@ -227,6 +227,7 @@ class TDTestCase:
# fisr add three mnodes;
tdSql.execute("create mnode on dnode 2")
time.sleep(10)
tdSql.execute("create mnode on dnode 3")
# fisrt check statut ready
......
......@@ -11,7 +11,9 @@
# -*- coding: utf-8 -*-
from asyncore import loop
from collections import defaultdict
import subprocess
import random
import string
import threading
......@@ -75,7 +77,7 @@ class TMQCom:
return resultList
def startTmqSimProcess(self,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
def startTmqSimProcess(self,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0,alias=0):
buildPath = tdCom.getBuildPath()
cfgPath = tdCom.getClientCfgPath()
if valgrind == 1:
......@@ -88,30 +90,53 @@ class TMQCom:
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> nul 2>&1 &"
else:
shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath
processorName = buildPath + '/build/bin/tmq_sim'
if alias != 0:
processorNameNew = buildPath + '/build/bin/tmq_sim_new'
shellCmd = 'cp %s %s'%(processorName, processorNameNew)
os.system(shellCmd)
processorName = processorNameNew
shellCmd = 'nohup ' + processorName + ' -c ' + cfgPath
shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName)
shellCmd += "> /dev/null 2>&1 &"
tdLog.info(shellCmd)
os.system(shellCmd)
def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb'):
while 1:
os.system(shellCmd)
def stopTmqSimProcess(self, processorName):
psCmd = "ps -ef|grep -w %s|grep -v grep | awk '{print $2}'"%(processorName)
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(0.2)
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
tdLog.debug("%s is stopped by kill -INT" % (processorName))
def getStartConsumeNotifyFromTmqsim(self,cdbName='cdb',rows=1):
loopFlag = 1
while loopFlag:
tdSql.query("select * from %s.notifyinfo"%cdbName)
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
if (tdSql.getRows() == 1) and (tdSql.getData(0, 1) == 0):
break
else:
time.sleep(0.1)
actRows = tdSql.getRows()
if (actRows >= rows):
for i in range(actRows):
if tdSql.getData(i, 1) == 0:
loopFlag = 0
break
time.sleep(0.1)
return
def getStartCommitNotifyFromTmqsim(self,cdbName='cdb'):
while 1:
def getStartCommitNotifyFromTmqsim(self,cdbName='cdb',rows=2):
loopFlag = 1
while loopFlag:
tdSql.query("select * from %s.notifyinfo"%cdbName)
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
if tdSql.getRows() == 2 :
print(tdSql.getData(0, 1), tdSql.getData(1, 1))
if tdSql.getData(1, 1) == 1:
break
actRows = tdSql.getRows()
if (actRows >= rows):
for i in range(actRows):
if tdSql.getData(i, 1) == 1:
loopFlag = 0
break
time.sleep(0.1)
return
......
import taos
import sys
import time
import socket
import os
import threading
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
def checkFileContent(self, consumerId, queryString):
buildPath = tdCom.getBuildPath()
cfgPath = tdCom.getClientCfgPath()
dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId)
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
else:
break
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db1',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 10,
'rowsPerTbl': 1000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 20,
'showMsg': 1,
'showRow': 1}
topicNameList = ['topic1', 'topic2']
expectRowsList = []
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
tdLog.info("create stb")
tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
tdLog.info("create ctb")
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
tdLog.info("insert data")
tmqCom.insert_data_2(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdLog.info("create topics from stb with filter")
# queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# create one stb2
paraDict["stbName"] = 'stb2'
paraDict["ctbPrefix"] = 'ctbx'
paraDict["rowsPerTbl"] = 5000
tdLog.info("create stb2")
tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
tdLog.info("create ctb2")
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
# queryString = "select ts, sin(c1), abs(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
queryString = "select ts, sin(c1), abs(pow(c1,3)) from %s.%s" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
# tdSql.query(queryString)
# expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = "%s,%s"%(topicNameList[0],topicNameList[1])
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:3000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor 1")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("start consume processor 2")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],'cdb',0,1)
tdLog.info("async insert data")
pThread = tmqCom.asyncInsertData(paraDict)
tdLog.info("wait consumer commit notify")
tmqCom.getStartCommitNotifyFromTmqsim(rows=4)
tdLog.info("pkill one consume processor")
tmqCom.stopTmqSimProcess('tmq_sim_new')
pThread.join()
tdLog.info("wait the consume result")
expectRows = 2
resultList = tmqCom.selectConsumeResult(expectRows)
actTotalRows = 0
for i in range(len(resultList)):
actTotalRows += resultList[i]
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
expectTotalRows = 0
for i in range(len(expectRowsList)):
expectTotalRows += expectRowsList[i]
tdLog.info("act consume rows: %d, expect consume rows: %d"%(actTotalRows, expectTotalRows))
if expectTotalRows <= resultList[0]:
tdLog.info("act consume rows: %d should >= expect consume rows: %d"%(actTotalRows, expectTotalRows))
tdLog.exit("0 tmq consume rows error!")
# time.sleep(10)
# for i in range(len(topicNameList)):
# tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def run(self):
tdSql.prepare()
self.tmqCase1()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
......@@ -109,6 +109,9 @@ python3 ./test.py -f 2-query/distribute_agg_apercentile.py
python3 ./test.py -f 2-query/distribute_agg_avg.py
python3 ./test.py -f 2-query/distribute_agg_stddev.py
python3 ./test.py -f 2-query/twa.py
python3 ./test.py -f 2-query/irate.py
python3 ./test.py -f 2-query/function_null.py
python3 ./test.py -f 6-cluster/5dnode1mnode.py
python3 ./test.py -f 6-cluster/5dnode2mnode.py
......@@ -135,3 +138,4 @@ python3 ./test.py -f 7-tmq/stbFilter.py
python3 ./test.py -f 7-tmq/tmqCheckData.py
python3 ./test.py -f 7-tmq/tmqUdf.py
#python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 5
python3 ./test.py -f 7-tmq/tmqConsumerGroup.py
@REM python3 .\test.py -f 0-others\taosShell.py
@REM python3 .\test.py -f 0-others\taosShellError.py
python3 .\test.py -f 0-others\taosShell.py
python3 .\test.py -f 0-others\taosShellError.py
python3 .\test.py -f 0-others\taosShellNetChk.py
python3 .\test.py -f 0-others\telemetry.py
python3 .\test.py -f 0-others\taosdMonitor.py
python3 .\test.py -f 0-others\udfTest.py
@REM python3 .\test.py -f 0-others\udf_create.py
@REM python3 .\test.py -f 0-others\udf_restart_taosd.py
python3 .\test.py -f 0-others\udf_create.py
python3 .\test.py -f 0-others\udf_restart_taosd.py
@REM python3 .\test.py -f 0-others\cachelast.py
@REM python3 .\test.py -f 0-others\user_control.py
......
......@@ -38,7 +38,9 @@ def checkRunTimeError():
while 1:
time.sleep(1)
timeCount = timeCount + 1
print("checkRunTimeError",timeCount)
if (timeCount>900):
print("stop the test.")
os.system("TASKKILL /F /IM taosd.exe")
os.system("TASKKILL /F /IM taos.exe")
os.system("TASKKILL /F /IM tmq_sim.exe")
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册