diff --git a/cmake/cmake.define b/cmake/cmake.define index 094eb4a2dab07a484504d4a4fe8175b4a8eb269e..e5ef08acb872b56ad041642e81d83c108a7674c6 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -15,12 +15,12 @@ MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH}) find_package(Git QUIET) -if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git") +if(GIT_FOUND AND EXISTS "${TD_SOURCE_DIR}/.git") # Update submodules as needed option(GIT_SUBMODULE "Check submodules during build" ON) if(GIT_SUBMODULE) message(STATUS "Submodule update") - execute_process(COMMAND ${GIT_EXECUTABLE} submodule update --init --recursive + execute_process(COMMAND cd ${TD_SOURCE_DIR} && ${GIT_EXECUTABLE} submodule update --init --recursive WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} RESULT_VARIABLE GIT_SUBMOD_RESULT) if(NOT GIT_SUBMOD_RESULT EQUAL "0") @@ -29,7 +29,7 @@ if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git") endif() endif() -if(NOT EXISTS "${PROJECT_SOURCE_DIR}/tools/taos-tools/CMakeLists.txt") +if(NOT EXISTS "${TD_SOURCE_DIR}/tools/taos-tools/CMakeLists.txt") message(WARNING "The submodules were not downloaded! GIT_SUBMODULE was turned off or failed. Please update submodules manually if you need build them.") endif() diff --git a/include/common/systable.h b/include/common/systable.h index bd8aae998f9ab6a692b7c5793fbac0c58ae84eb7..506bdcfa8a78e00981d22da51f4116d5c12de64f 100644 --- a/include/common/systable.h +++ b/include/common/systable.h @@ -23,7 +23,6 @@ extern "C" { #define TDENGINE_SYSTABLE_H #define TSDB_INFORMATION_SCHEMA_DB "information_schema" -#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema" #define TSDB_INS_TABLE_DNODES "dnodes" #define TSDB_INS_TABLE_MNODES "mnodes" #define TSDB_INS_TABLE_MODULES "modules" @@ -44,27 +43,27 @@ extern "C" { #define TSDB_INS_TABLE_VNODES "vnodes" #define TSDB_INS_TABLE_CONFIGS "configs" -#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema" -#define TSDB_PERFS_TABLE_SMAS "smas" -#define TSDB_PERFS_TABLE_SUBSCRIBES "subscribes" -#define TSDB_PERFS_TABLE_CONNECTIONS "connections" -#define TSDB_PERFS_TABLE_QUERIES "queries" -#define TSDB_PERFS_TABLE_TOPICS "topics" -#define TSDB_PERFS_TABLE_CONSUMERS "consumers" -#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "subscriptions" -#define TSDB_PERFS_TABLE_OFFSETS "offsets" -#define TSDB_PERFS_TABLE_TRANS "trans" -#define TSDB_PERFS_TABLE_STREAMS "streams" +#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema" +#define TSDB_PERFS_TABLE_SMAS "smas" +#define TSDB_PERFS_TABLE_SUBSCRIBES "subscribes" +#define TSDB_PERFS_TABLE_CONNECTIONS "connections" +#define TSDB_PERFS_TABLE_QUERIES "queries" +#define TSDB_PERFS_TABLE_TOPICS "topics" +#define TSDB_PERFS_TABLE_CONSUMERS "consumers" +#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "subscriptions" +#define TSDB_PERFS_TABLE_OFFSETS "offsets" +#define TSDB_PERFS_TABLE_TRANS "trans" +#define TSDB_PERFS_TABLE_STREAMS "streams" typedef struct SSysDbTableSchema { - const char *name; + const char* name; const int32_t type; const int32_t bytes; } SSysDbTableSchema; typedef struct SSysTableMeta { - const char *name; - const SSysDbTableSchema *schema; + const char* name; + const SSysDbTableSchema* schema; const int32_t colNum; } SSysTableMeta; diff --git a/include/common/tglobal.h b/include/common/tglobal.h index f253d3196396b1a880ee5a6f73d08f739c3ac710..da5158abb562c4236282f689b61cf38be2337e85 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -17,8 +17,8 @@ #define _TD_COMMON_GLOBAL_H_ #include "tarray.h" -#include "tdef.h" #include "tconfig.h" +#include "tdef.h" #ifdef __cplusplus extern "C" { @@ -121,15 +121,16 @@ extern char tsCompressor[]; extern int32_t tsDiskCfgNum; extern SDiskCfg tsDiskCfg[]; -// internal -extern int32_t tsTransPullupMs; -extern int32_t tsMaRebalanceMs; +// internal +extern int32_t tsTransPullupInterval; +extern int32_t tsMqRebalanceInterval; #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) -int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd, const char *envFile, - char *apolloUrl, SArray *pArgs, bool tsc); -int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc); +int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd, + const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc); +int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile, char *apolloUrl, SArray *pArgs, + bool tsc); void taosCleanupCfg(); void taosCfgDynamicOptions(const char *option, const char *value); void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary); diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 36bef5e85a905bae1f4c84f71e352257791a600e..8e918c40f921138bfa2fc2e99c226f7be384c537 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -204,7 +204,6 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp) - // sync integration TD_DEF_MSG_TYPE(TDMT_VND_SYNC_TIMEOUT, "vnode-sync-timeout", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SYNC_PING, "vnode-sync-ping", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SYNC_PING_REPLY, "vnode-sync-ping-reply", NULL, NULL) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 4e843aeb596506fea0537f48d740aab57b0d78b3..6a57b8d53ea30a50613a3bc2885299d548c4da30 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -411,7 +411,6 @@ int32_t* taosGetErrno(); #define TSDB_CODE_SYN_INVALID_MSGLEN TAOS_DEF_ERROR_CODE(0, 0x0909) #define TSDB_CODE_SYN_INVALID_MSGTYPE TAOS_DEF_ERROR_CODE(0, 0x090A) -// sync integration #define TSDB_CODE_SYN_NOT_LEADER TAOS_DEF_ERROR_CODE(0, 0x0910) #define TSDB_CODE_SYN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x09FF) diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 51f924280ae2447aa2de50ecda78bd661857e67d..fba14bbaf582317a6665c8856ab537dd0b1c133e 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -211,6 +211,7 @@ static const SSysDbTableSchema transSchema[] = { {.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "type", .bytes = TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "last_error", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, }; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 87a9c521afed5c803f454c66388e579c4596c113..c8781097114f12654c2fbef32908d86bc51c81f6 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -170,8 +170,8 @@ uint32_t tsCurRange = 100; // range char tsCompressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRESSOR // internal -int32_t tsTransPullupMs = 6000; -int32_t tsMaRebalanceMs = 2000; +int32_t tsTransPullupInterval = 6; +int32_t tsMqRebalanceInterval = 2; void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary) { tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN); @@ -438,6 +438,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddString(pCfg, "telemetryServer", tsTelemServer, 0) != 0) return -1; if (cfgAddInt32(pCfg, "telemetryPort", tsTelemPort, 1, 65056, 0) != 0) return -1; + if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1; + if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1; + return 0; } @@ -575,6 +578,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN); tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32; + tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32; + tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; + if (tsQueryBufferSize >= 0) { tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL; } diff --git a/source/dnode/mgmt/implement/src/dmEps.c b/source/dnode/mgmt/implement/src/dmEps.c index 853c23831661dbd83f53e637d8458d5ca0b62ea5..f5c9a1d91b2fc7f2169386980ec627ac2f5473fc 100644 --- a/source/dnode/mgmt/implement/src/dmEps.c +++ b/source/dnode/mgmt/implement/src/dmEps.c @@ -51,7 +51,7 @@ int32_t dmReadEps(SDnode *pDnode) { pDnode->data.dnodeEps = taosArrayInit(1, sizeof(SDnodeEp)); if (pDnode->data.dnodeEps == NULL) { dError("failed to calloc dnodeEp array since %s", strerror(errno)); - goto PRASE_DNODE_OVER; + goto _OVER; } snprintf(file, sizeof(file), "%s%sdnode.json", pDnode->wrappers[DNODE].path, TD_DIRSEP); @@ -59,53 +59,53 @@ int32_t dmReadEps(SDnode *pDnode) { if (pFile == NULL) { // dDebug("file %s not exist", file); code = 0; - goto PRASE_DNODE_OVER; + goto _OVER; } len = (int32_t)taosReadFile(pFile, content, maxLen); if (len <= 0) { dError("failed to read %s since content is null", file); - goto PRASE_DNODE_OVER; + goto _OVER; } content[len] = 0; root = cJSON_Parse(content); if (root == NULL) { dError("failed to read %s since invalid json format", file); - goto PRASE_DNODE_OVER; + goto _OVER; } cJSON *dnodeId = cJSON_GetObjectItem(root, "dnodeId"); if (!dnodeId || dnodeId->type != cJSON_Number) { dError("failed to read %s since dnodeId not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } pDnode->data.dnodeId = dnodeId->valueint; cJSON *clusterId = cJSON_GetObjectItem(root, "clusterId"); if (!clusterId || clusterId->type != cJSON_String) { dError("failed to read %s since clusterId not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } pDnode->data.clusterId = atoll(clusterId->valuestring); cJSON *dropped = cJSON_GetObjectItem(root, "dropped"); if (!dropped || dropped->type != cJSON_Number) { dError("failed to read %s since dropped not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } pDnode->data.dropped = dropped->valueint; cJSON *dnodes = cJSON_GetObjectItem(root, "dnodes"); if (!dnodes || dnodes->type != cJSON_Array) { dError("failed to read %s since dnodes not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } int32_t numOfDnodes = cJSON_GetArraySize(dnodes); if (numOfDnodes <= 0) { dError("failed to read %s since numOfDnodes:%d invalid", file, numOfDnodes); - goto PRASE_DNODE_OVER; + goto _OVER; } for (int32_t i = 0; i < numOfDnodes; ++i) { @@ -117,7 +117,7 @@ int32_t dmReadEps(SDnode *pDnode) { cJSON *did = cJSON_GetObjectItem(node, "id"); if (!did || did->type != cJSON_Number) { dError("failed to read %s since dnodeId not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } dnodeEp.id = did->valueint; @@ -125,14 +125,14 @@ int32_t dmReadEps(SDnode *pDnode) { cJSON *dnodeFqdn = cJSON_GetObjectItem(node, "fqdn"); if (!dnodeFqdn || dnodeFqdn->type != cJSON_String || dnodeFqdn->valuestring == NULL) { dError("failed to read %s since dnodeFqdn not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } tstrncpy(dnodeEp.ep.fqdn, dnodeFqdn->valuestring, TSDB_FQDN_LEN); cJSON *dnodePort = cJSON_GetObjectItem(node, "port"); if (!dnodePort || dnodePort->type != cJSON_Number) { dError("failed to read %s since dnodePort not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } dnodeEp.ep.port = dnodePort->valueint; @@ -140,7 +140,7 @@ int32_t dmReadEps(SDnode *pDnode) { cJSON *isMnode = cJSON_GetObjectItem(node, "isMnode"); if (!isMnode || isMnode->type != cJSON_Number) { dError("failed to read %s since isMnode not found", file); - goto PRASE_DNODE_OVER; + goto _OVER; } dnodeEp.isMnode = isMnode->valueint; @@ -151,7 +151,7 @@ int32_t dmReadEps(SDnode *pDnode) { dDebug("succcessed to read file %s", file); dmPrintEps(pDnode); -PRASE_DNODE_OVER: +_OVER: if (content != NULL) taosMemoryFree(content); if (root != NULL) cJSON_Delete(root); if (pFile != NULL) taosCloseFile(&pFile); @@ -176,7 +176,7 @@ PRASE_DNODE_OVER: int32_t dmWriteEps(SDnode *pDnode) { char file[PATH_MAX] = {0}; - char realfile[PATH_MAX]; + char realfile[PATH_MAX] = {0}; snprintf(file, sizeof(file), "%s%sdnode.json.bak", pDnode->wrappers[DNODE].path, TD_DIRSEP); snprintf(realfile, sizeof(realfile), "%s%sdnode.json", pDnode->wrappers[DNODE].path, TD_DIRSEP); diff --git a/source/dnode/mgmt/implement/src/dmWorker.c b/source/dnode/mgmt/implement/src/dmWorker.c index b19c2ab36b906f4caa52137de5434d7498e1cad5..72b21115918641a7962619118ff7487f22cad126 100644 --- a/source/dnode/mgmt/implement/src/dmWorker.c +++ b/source/dnode/mgmt/implement/src/dmWorker.c @@ -105,12 +105,13 @@ void dmStopMonitorThread(SDnode *pDnode) { } static void dmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { - SDnode * pDnode = pInfo->ahandle; - SRpcMsg *pRpc = &pMsg->rpcMsg; - int32_t code = -1; + SDnode *pDnode = pInfo->ahandle; + + int32_t code = -1; + tmsg_t msgType = pMsg->rpcMsg.msgType; dTrace("msg:%p, will be processed in dnode-mgmt queue", pMsg); - switch (pRpc->msgType) { + switch (msgType) { case TDMT_DND_CONFIG_DNODE: code = dmProcessConfigReq(pDnode, pMsg); break; @@ -148,9 +149,14 @@ static void dmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { break; } - if (pRpc->msgType & 1u) { - if (code != 0) code = terrno; - SRpcMsg rsp = {.handle = pRpc->handle, .ahandle = pRpc->ahandle, .code = code, .refId = pRpc->refId}; + if (msgType & 1u) { + if (code != 0 && terrno != 0) code = terrno; + SRpcMsg rsp = { + .handle = pMsg->rpcMsg.handle, + .ahandle = pMsg->rpcMsg.ahandle, + .code = code, + .refId = pMsg->rpcMsg.refId, + }; rpcSendResponse(&rsp); } @@ -160,7 +166,13 @@ static void dmProcessMgmtQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { } int32_t dmStartWorker(SDnode *pDnode) { - SSingleWorkerCfg cfg = {.min = 1, .max = 1, .name = "dnode-mgmt", .fp = (FItem)dmProcessMgmtQueue, .param = pDnode}; + SSingleWorkerCfg cfg = { + .min = 1, + .max = 1, + .name = "dnode-mgmt", + .fp = (FItem)dmProcessMgmtQueue, + .param = pDnode, + }; if (tSingleWorkerInit(&pDnode->data.mgmtWorker, &cfg) != 0) { dError("failed to start dnode-mgmt worker since %s", terrstr()); return -1; diff --git a/source/dnode/mgmt/mgmt_bnode/src/bmWorker.c b/source/dnode/mgmt/mgmt_bnode/src/bmWorker.c index 230fa236746c0afd9e83f0037c4637a351eec6dd..d3204039e6769e62d28586da96aa9b1e3adf095a 100644 --- a/source/dnode/mgmt/mgmt_bnode/src/bmWorker.c +++ b/source/dnode/mgmt/mgmt_bnode/src/bmWorker.c @@ -18,7 +18,11 @@ static void bmSendErrorRsp(SNodeMsg *pMsg, int32_t code) { SRpcMsg rpcRsp = { - .handle = pMsg->rpcMsg.handle, .ahandle = pMsg->rpcMsg.ahandle, .code = code, .refId = pMsg->rpcMsg.refId}; + .handle = pMsg->rpcMsg.handle, + .ahandle = pMsg->rpcMsg.ahandle, + .code = code, + .refId = pMsg->rpcMsg.refId, + }; tmsgSendRsp(&rpcRsp); dTrace("msg:%p, is freed", pMsg); @@ -103,7 +107,7 @@ static void bmProcessWriteQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numO } int32_t bmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { - SBnodeMgmt * pMgmt = pWrapper->pMgmt; + SBnodeMgmt *pMgmt = pWrapper->pMgmt; SMultiWorker *pWorker = &pMgmt->writeWorker; dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name); @@ -112,7 +116,7 @@ int32_t bmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { } int32_t bmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { - SBnodeMgmt * pMgmt = pWrapper->pMgmt; + SBnodeMgmt *pMgmt = pWrapper->pMgmt; SSingleWorker *pWorker = &pMgmt->monitorWorker; dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name); @@ -121,7 +125,12 @@ int32_t bmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { } int32_t bmStartWorker(SBnodeMgmt *pMgmt) { - SMultiWorkerCfg cfg = {.max = 1, .name = "bnode-write", .fp = (FItems)bmProcessWriteQueue, .param = pMgmt}; + SMultiWorkerCfg cfg = { + .max = 1, + .name = "bnode-write", + .fp = (FItems)bmProcessWriteQueue, + .param = pMgmt, + }; if (tMultiWorkerInit(&pMgmt->writeWorker, &cfg) != 0) { dError("failed to start bnode-write worker since %s", terrstr()); return -1; @@ -129,7 +138,12 @@ int32_t bmStartWorker(SBnodeMgmt *pMgmt) { if (tsMultiProcess) { SSingleWorkerCfg mCfg = { - .min = 1, .max = 1, .name = "bnode-monitor", .fp = (FItem)bmProcessMonitorQueue, .param = pMgmt}; + .min = 1, + .max = 1, + .name = "bnode-monitor", + .fp = (FItem)bmProcessMonitorQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) { dError("failed to start bnode-monitor worker since %s", terrstr()); return -1; diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c index 75c48e79eb7966fb52fbbf45bb874df93a36d17f..83c832a41eca6f220e1de218cad4bd88e8c3554d 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmFile.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmFile.c @@ -22,7 +22,7 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) { int32_t maxLen = 4096; char *content = taosMemoryCalloc(1, maxLen + 1); cJSON *root = NULL; - char file[PATH_MAX]; + char file[PATH_MAX] = {0}; TdFilePtr pFile = NULL; snprintf(file, sizeof(file), "%s%smnode.json", pMgmt->path, TD_DIRSEP); @@ -30,39 +30,39 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) { if (pFile == NULL) { // dDebug("file %s not exist", file); code = 0; - goto PRASE_MNODE_OVER; + goto _OVER; } len = (int32_t)taosReadFile(pFile, content, maxLen); if (len <= 0) { dError("failed to read %s since content is null", file); - goto PRASE_MNODE_OVER; + goto _OVER; } content[len] = 0; root = cJSON_Parse(content); if (root == NULL) { dError("failed to read %s since invalid json format", file); - goto PRASE_MNODE_OVER; + goto _OVER; } cJSON *deployed = cJSON_GetObjectItem(root, "deployed"); if (!deployed || deployed->type != cJSON_Number) { dError("failed to read %s since deployed not found", file); - goto PRASE_MNODE_OVER; + goto _OVER; } *pDeployed = deployed->valueint; cJSON *mnodes = cJSON_GetObjectItem(root, "mnodes"); if (!mnodes || mnodes->type != cJSON_Array) { dError("failed to read %s since nodes not found", file); - goto PRASE_MNODE_OVER; + goto _OVER; } pMgmt->replica = cJSON_GetArraySize(mnodes); if (pMgmt->replica <= 0 || pMgmt->replica > TSDB_MAX_REPLICA) { dError("failed to read %s since mnodes size %d invalid", file, pMgmt->replica); - goto PRASE_MNODE_OVER; + goto _OVER; } for (int32_t i = 0; i < pMgmt->replica; ++i) { @@ -74,21 +74,21 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) { cJSON *id = cJSON_GetObjectItem(node, "id"); if (!id || id->type != cJSON_Number) { dError("failed to read %s since id not found", file); - goto PRASE_MNODE_OVER; + goto _OVER; } pReplica->id = id->valueint; cJSON *fqdn = cJSON_GetObjectItem(node, "fqdn"); if (!fqdn || fqdn->type != cJSON_String || fqdn->valuestring == NULL) { dError("failed to read %s since fqdn not found", file); - goto PRASE_MNODE_OVER; + goto _OVER; } tstrncpy(pReplica->fqdn, fqdn->valuestring, TSDB_FQDN_LEN); cJSON *port = cJSON_GetObjectItem(node, "port"); if (!port || port->type != cJSON_Number) { dError("failed to read %s since port not found", file); - goto PRASE_MNODE_OVER; + goto _OVER; } pReplica->port = port->valueint; } @@ -96,7 +96,7 @@ int32_t mmReadFile(SMnodeMgmt *pMgmt, bool *pDeployed) { code = 0; dDebug("succcessed to read file %s, deployed:%d", file, *pDeployed); -PRASE_MNODE_OVER: +_OVER: if (content != NULL) taosMemoryFree(content); if (root != NULL) cJSON_Delete(root); if (pFile != NULL) taosCloseFile(&pFile); diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c index db69b62e5822b424d542e00ef39509c20b3f9fc5..0bf846b7fc772952965a43ca2fd979a85bd661a3 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmInt.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmInt.c @@ -161,9 +161,7 @@ static int32_t mmOpen(SMgmtWrapper *pWrapper) { SMnodeOpt option = {0}; if (!deployed) { dInfo("mnode start to deploy"); - // if (pWrapper->procType == DND_PROC_CHILD) { - pWrapper->pDnode->data.dnodeId = 1; - // } + pWrapper->pDnode->data.dnodeId = 1; mmBuildOptionForDeploy(pMgmt, &option); } else { dInfo("mnode start to open"); diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c index 1f27b314e20ec49b902c35babf052e6eeb0eb463..aac5bbc16afdc074d785a15f836f6c9d637bc1ce 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmWorker.c @@ -17,42 +17,48 @@ #include "mmInt.h" static inline void mmSendRsp(SNodeMsg *pMsg, int32_t code) { - SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle, - .ahandle = pMsg->rpcMsg.ahandle, - .refId = pMsg->rpcMsg.refId, - .code = code, - .pCont = pMsg->pRsp, - .contLen = pMsg->rspLen}; + SRpcMsg rsp = { + .handle = pMsg->rpcMsg.handle, + .ahandle = pMsg->rpcMsg.ahandle, + .refId = pMsg->rpcMsg.refId, + .code = code, + .pCont = pMsg->pRsp, + .contLen = pMsg->rspLen, + }; tmsgSendRsp(&rsp); } static void mmProcessQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { SMnodeMgmt *pMgmt = pInfo->ahandle; + int32_t code = -1; + tmsg_t msgType = pMsg->rpcMsg.msgType; dTrace("msg:%p, get from mnode queue", pMsg); - SRpcMsg *pRpc = &pMsg->rpcMsg; - int32_t code = -1; - if (pMsg->rpcMsg.msgType == TDMT_DND_ALTER_MNODE) { - code = mmProcessAlterReq(pMgmt, pMsg); - } else if (pMsg->rpcMsg.msgType == TDMT_MON_MM_INFO) { - code = mmProcessGetMonMmInfoReq(pMgmt->pWrapper, pMsg); - } else if (pMsg->rpcMsg.msgType == TDMT_MON_MM_LOAD) { - code = mmProcessGetMnodeLoadsReq(pMgmt->pWrapper, pMsg); - } else { - pMsg->pNode = pMgmt->pMnode; - code = mndProcessMsg(pMsg); + switch (msgType) { + case TDMT_DND_ALTER_MNODE: + code = mmProcessAlterReq(pMgmt, pMsg); + break; + case TDMT_MON_MM_INFO: + code = mmProcessGetMonMmInfoReq(pMgmt->pWrapper, pMsg); + break; + case TDMT_MON_MM_LOAD: + code = mmProcessGetMnodeLoadsReq(pMgmt->pWrapper, pMsg); + break; + default: + pMsg->pNode = pMgmt->pMnode; + code = mndProcessMsg(pMsg); } - if (pRpc->msgType & 1U) { - if (pRpc->handle != NULL && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { + if (msgType & 1U) { + if (pMsg->rpcMsg.handle != NULL && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { if (code != 0 && terrno != 0) code = terrno; mmSendRsp(pMsg, code); } } dTrace("msg:%p, is freed, result:0x%04x:%s", pMsg, code & 0XFFFF, tstrerror(code)); - rpcFreeCont(pRpc->pCont); + rpcFreeCont(pMsg->rpcMsg.pCont); taosFreeQitem(pMsg); } @@ -78,38 +84,38 @@ static void mmProcessQueryQueue(SQueueInfo *pInfo, SNodeMsg *pMsg) { taosFreeQitem(pMsg); } -static void mmPutMsgToWorker(SSingleWorker *pWorker, SNodeMsg *pMsg) { +static void mmPutNodeMsgToWorker(SSingleWorker *pWorker, SNodeMsg *pMsg) { dTrace("msg:%p, put into worker %s", pMsg, pWorker->name); taosWriteQitem(pWorker->queue, pMsg); } int32_t mmProcessWriteMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { SMnodeMgmt *pMgmt = pWrapper->pMgmt; - mmPutMsgToWorker(&pMgmt->writeWorker, pMsg); + mmPutNodeMsgToWorker(&pMgmt->writeWorker, pMsg); return 0; } int32_t mmProcessSyncMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { SMnodeMgmt *pMgmt = pWrapper->pMgmt; - mmPutMsgToWorker(&pMgmt->syncWorker, pMsg); + mmPutNodeMsgToWorker(&pMgmt->syncWorker, pMsg); return 0; } int32_t mmProcessReadMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { SMnodeMgmt *pMgmt = pWrapper->pMgmt; - mmPutMsgToWorker(&pMgmt->readWorker, pMsg); + mmPutNodeMsgToWorker(&pMgmt->readWorker, pMsg); return 0; } int32_t mmProcessQueryMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { SMnodeMgmt *pMgmt = pWrapper->pMgmt; - mmPutMsgToWorker(&pMgmt->queryWorker, pMsg); + mmPutNodeMsgToWorker(&pMgmt->queryWorker, pMsg); return 0; } int32_t mmProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { SMnodeMgmt *pMgmt = pWrapper->pMgmt; - mmPutMsgToWorker(&pMgmt->monitorWorker, pMsg); + mmPutNodeMsgToWorker(&pMgmt->monitorWorker, pMsg); return 0; } @@ -144,40 +150,62 @@ int32_t mmPutMsgToSyncQueue(SMgmtWrapper *pWrapper, SRpcMsg *pRpc) { } int32_t mmStartWorker(SMnodeMgmt *pMgmt) { - SSingleWorkerCfg qCfg = {.min = tsNumOfMnodeQueryThreads, - .max = tsNumOfMnodeQueryThreads, - .name = "mnode-query", - .fp = (FItem)mmProcessQueryQueue, - .param = pMgmt}; + SSingleWorkerCfg qCfg = { + .min = tsNumOfMnodeQueryThreads, + .max = tsNumOfMnodeQueryThreads, + .name = "mnode-query", + .fp = (FItem)mmProcessQueryQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->queryWorker, &qCfg) != 0) { dError("failed to start mnode-query worker since %s", terrstr()); return -1; } - SSingleWorkerCfg rCfg = {.min = tsNumOfMnodeReadThreads, - .max = tsNumOfMnodeReadThreads, - .name = "mnode-read", - .fp = (FItem)mmProcessQueue, - .param = pMgmt}; + SSingleWorkerCfg rCfg = { + .min = tsNumOfMnodeReadThreads, + .max = tsNumOfMnodeReadThreads, + .name = "mnode-read", + .fp = (FItem)mmProcessQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->readWorker, &rCfg) != 0) { dError("failed to start mnode-read worker since %s", terrstr()); return -1; } - SSingleWorkerCfg wCfg = {.min = 1, .max = 1, .name = "mnode-write", .fp = (FItem)mmProcessQueue, .param = pMgmt}; + SSingleWorkerCfg wCfg = { + .min = 1, + .max = 1, + .name = "mnode-write", + .fp = (FItem)mmProcessQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->writeWorker, &wCfg) != 0) { dError("failed to start mnode-write worker since %s", terrstr()); return -1; } - SSingleWorkerCfg sCfg = {.min = 1, .max = 1, .name = "mnode-sync", .fp = (FItem)mmProcessQueue, .param = pMgmt}; + SSingleWorkerCfg sCfg = { + .min = 1, + .max = 1, + .name = "mnode-sync", + .fp = (FItem)mmProcessQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->syncWorker, &sCfg) != 0) { dError("failed to start mnode mnode-sync worker since %s", terrstr()); return -1; } if (tsMultiProcess) { - SSingleWorkerCfg mCfg = {.min = 1, .max = 1, .name = "mnode-monitor", .fp = (FItem)mmProcessQueue, .param = pMgmt}; + SSingleWorkerCfg mCfg = { + .min = 1, + .max = 1, + .name = "mnode-monitor", + .fp = (FItem)mmProcessQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) { dError("failed to start mnode mnode-monitor worker since %s", terrstr()); return -1; diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c index da85ee64a81b0e69255b670b56ab74ac42d69abc..965d35cb3e4546bdf7e4b48d4c4d9b13aef9c6ca 100644 --- a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c +++ b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c @@ -17,12 +17,14 @@ #include "qmInt.h" static inline void qmSendRsp(SNodeMsg *pMsg, int32_t code) { - SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle, - .ahandle = pMsg->rpcMsg.ahandle, - .refId = pMsg->rpcMsg.refId, - .code = code, - .pCont = pMsg->pRsp, - .contLen = pMsg->rspLen}; + SRpcMsg rsp = { + .handle = pMsg->rpcMsg.handle, + .ahandle = pMsg->rpcMsg.ahandle, + .refId = pMsg->rpcMsg.refId, + .code = code, + .pCont = pMsg->pRsp, + .contLen = pMsg->rspLen, + }; tmsgSendRsp(&rsp); } @@ -145,22 +147,26 @@ int32_t qmGetQueueSize(SMgmtWrapper *pWrapper, int32_t vgId, EQueueType qtype) { } int32_t qmStartWorker(SQnodeMgmt *pMgmt) { - SSingleWorkerCfg queryCfg = {.min = tsNumOfVnodeQueryThreads, - .max = tsNumOfVnodeQueryThreads, - .name = "qnode-query", - .fp = (FItem)qmProcessQueryQueue, - .param = pMgmt}; + SSingleWorkerCfg queryCfg = { + .min = tsNumOfVnodeQueryThreads, + .max = tsNumOfVnodeQueryThreads, + .name = "qnode-query", + .fp = (FItem)qmProcessQueryQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->queryWorker, &queryCfg) != 0) { dError("failed to start qnode-query worker since %s", terrstr()); return -1; } - SSingleWorkerCfg fetchCfg = {.min = tsNumOfQnodeFetchThreads, - .max = tsNumOfQnodeFetchThreads, - .name = "qnode-fetch", - .fp = (FItem)qmProcessFetchQueue, - .param = pMgmt}; + SSingleWorkerCfg fetchCfg = { + .min = tsNumOfQnodeFetchThreads, + .max = tsNumOfQnodeFetchThreads, + .name = "qnode-fetch", + .fp = (FItem)qmProcessFetchQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->fetchWorker, &fetchCfg) != 0) { dError("failed to start qnode-fetch worker since %s", terrstr()); @@ -169,7 +175,12 @@ int32_t qmStartWorker(SQnodeMgmt *pMgmt) { if (tsMultiProcess) { SSingleWorkerCfg mCfg = { - .min = 1, .max = 1, .name = "qnode-monitor", .fp = (FItem)qmProcessMonitorQueue, .param = pMgmt}; + .min = 1, + .max = 1, + .name = "qnode-monitor", + .fp = (FItem)qmProcessMonitorQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) { dError("failed to start qnode-monitor worker since %s", terrstr()); return -1; diff --git a/source/dnode/mgmt/mgmt_snode/src/smWorker.c b/source/dnode/mgmt/mgmt_snode/src/smWorker.c index 25872aec55d4f2e00330a34e1a7887b24ca61e34..2ae439bbd6a081bd21036bb3667d5aa0645df896 100644 --- a/source/dnode/mgmt/mgmt_snode/src/smWorker.c +++ b/source/dnode/mgmt/mgmt_snode/src/smWorker.c @@ -17,12 +17,14 @@ #include "smInt.h" static inline void smSendRsp(SNodeMsg *pMsg, int32_t code) { - SRpcMsg rsp = {.handle = pMsg->rpcMsg.handle, - .ahandle = pMsg->rpcMsg.ahandle, - .refId = pMsg->rpcMsg.refId, - .code = code, - .pCont = pMsg->pRsp, - .contLen = pMsg->rspLen}; + SRpcMsg rsp = { + .handle = pMsg->rpcMsg.handle, + .ahandle = pMsg->rpcMsg.ahandle, + .refId = pMsg->rpcMsg.refId, + .code = code, + .pCont = pMsg->pRsp, + .contLen = pMsg->rspLen, + }; tmsgSendRsp(&rsp); } @@ -90,7 +92,12 @@ int32_t smStartWorker(SSnodeMgmt *pMgmt) { return -1; } - SMultiWorkerCfg cfg = {.max = 1, .name = "snode-unique", .fp = smProcessUniqueQueue, .param = pMgmt}; + SMultiWorkerCfg cfg = { + .max = 1, + .name = "snode-unique", + .fp = smProcessUniqueQueue, + .param = pMgmt, + }; if (tMultiWorkerInit(pUniqueWorker, &cfg) != 0) { dError("failed to start snode-unique worker since %s", terrstr()); return -1; @@ -101,11 +108,13 @@ int32_t smStartWorker(SSnodeMgmt *pMgmt) { } } - SSingleWorkerCfg cfg = {.min = tsNumOfSnodeSharedThreads, - .max = tsNumOfSnodeSharedThreads, - .name = "snode-shared", - .fp = (FItem)smProcessSharedQueue, - .param = pMgmt}; + SSingleWorkerCfg cfg = { + .min = tsNumOfSnodeSharedThreads, + .max = tsNumOfSnodeSharedThreads, + .name = "snode-shared", + .fp = (FItem)smProcessSharedQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->sharedWorker, &cfg)) { dError("failed to start snode shared-worker since %s", terrstr()); @@ -114,7 +123,12 @@ int32_t smStartWorker(SSnodeMgmt *pMgmt) { if (tsMultiProcess) { SSingleWorkerCfg mCfg = { - .min = 1, .max = 1, .name = "snode-monitor", .fp = (FItem)smProcessMonitorQueue, .param = pMgmt}; + .min = 1, + .max = 1, + .name = "snode-monitor", + .fp = (FItem)smProcessMonitorQueue, + .param = pMgmt, + }; if (tSingleWorkerInit(&pMgmt->monitorWorker, &mCfg) != 0) { dError("failed to start snode-monitor worker since %s", terrstr()); return -1; @@ -150,7 +164,7 @@ static FORCE_INLINE int32_t smGetSWTypeFromMsg(SRpcMsg *pMsg) { } int32_t smProcessMgmtMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { - SSnodeMgmt * pMgmt = pWrapper->pMgmt; + SSnodeMgmt *pMgmt = pWrapper->pMgmt; SMultiWorker *pWorker = taosArrayGetP(pMgmt->uniqueWorkers, 0); if (pWorker == NULL) { terrno = TSDB_CODE_INVALID_MSG; @@ -163,7 +177,7 @@ int32_t smProcessMgmtMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { } int32_t smProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { - SSnodeMgmt * pMgmt = pWrapper->pMgmt; + SSnodeMgmt *pMgmt = pWrapper->pMgmt; SSingleWorker *pWorker = &pMgmt->monitorWorker; dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name); @@ -172,7 +186,7 @@ int32_t smProcessMonitorMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { } int32_t smProcessUniqueMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { - SSnodeMgmt * pMgmt = pWrapper->pMgmt; + SSnodeMgmt *pMgmt = pWrapper->pMgmt; int32_t index = smGetSWIdFromMsg(&pMsg->rpcMsg); SMultiWorker *pWorker = taosArrayGetP(pMgmt->uniqueWorkers, index); if (pWorker == NULL) { @@ -186,7 +200,7 @@ int32_t smProcessUniqueMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { } int32_t smProcessSharedMsg(SMgmtWrapper *pWrapper, SNodeMsg *pMsg) { - SSnodeMgmt * pMgmt = pWrapper->pMgmt; + SSnodeMgmt *pMgmt = pWrapper->pMgmt; SSingleWorker *pWorker = &pMgmt->sharedWorker; dTrace("msg:%p, put into worker:%s", pMsg, pWorker->name); diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index a55cf412622fab0b18fbcde6d86b70cbfe11b92a..e522be06290a4340cdea5f1e1a9699f2cc303231 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -335,7 +335,6 @@ static int32_t mndProcessStatusReq(SNodeMsg *pReq) { } bool roleChanged = false; for (int32_t vg = 0; vg < pVgroup->replica; ++vg) { - // sync integration if (pVgroup->vnodeGid[vg].dnodeId == statusReq.dnodeId) { if (pVgroup->vnodeGid[vg].role != pVload->syncState) { roleChanged = true; diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index 3d4254da03bc6c630eb13857df169d59979a81bd..5085de86107ea16b8f4578e86730594e0046d23d 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -1405,15 +1405,18 @@ static int32_t mndRetrieveTrans(SNodeMsg *pReq, SShowObj *pShow, SSDataBlock *pB colDataAppend(pColInfo, numOfRows, (const char *)dbname, false); char type[TSDB_TRANS_TYPE_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_WITH_MAXSIZE_TO_VARSTR(dbname, mndTransType(pTrans->type), pShow->pMeta->pSchemas[cols].bytes); + STR_WITH_MAXSIZE_TO_VARSTR(type, mndTransType(pTrans->type), pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)type, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->failedTimes, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->lastExecTime, false); char lastError[TSDB_TRANS_ERROR_LEN + VARSTR_HEADER_SIZE] = {0}; - STR_WITH_MAXSIZE_TO_VARSTR(dbname, pTrans->lastError, pShow->pMeta->pSchemas[cols].bytes); + STR_WITH_MAXSIZE_TO_VARSTR(lastError, pTrans->lastError, pShow->pMeta->pSchemas[cols].bytes); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)lastError, false); diff --git a/source/dnode/mnode/impl/src/mnode.c b/source/dnode/mnode/impl/src/mnode.c index e2814e95f0fbd3bc0dba38c8973e3cd08af2858a..690399f09995b484e1649f14ec25534d05ffa119 100644 --- a/source/dnode/mnode/impl/src/mnode.c +++ b/source/dnode/mnode/impl/src/mnode.c @@ -65,7 +65,7 @@ static void mndPullupTrans(void *param, void *tmrId) { tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); } - taosTmrReset(mndPullupTrans, tsTransPullupMs, pMnode, pMnode->timer, &pMnode->transTimer); + taosTmrReset(mndPullupTrans, tsTransPullupInterval * 1000, pMnode, pMnode->timer, &pMnode->transTimer); } static void mndCalMqRebalance(void *param, void *tmrId) { @@ -81,7 +81,7 @@ static void mndCalMqRebalance(void *param, void *tmrId) { tmsgPutToQueue(&pMnode->msgCb, READ_QUEUE, &rpcMsg); } - taosTmrReset(mndCalMqRebalance, tsMaRebalanceMs, pMnode, pMnode->timer, &pMnode->mqTimer); + taosTmrReset(mndCalMqRebalance, tsMqRebalanceInterval * 1000, pMnode, pMnode->timer, &pMnode->mqTimer); } static void mndPullupTelem(void *param, void *tmrId) { @@ -103,12 +103,12 @@ static int32_t mndInitTimer(SMnode *pMnode) { return -1; } - if (taosTmrReset(mndPullupTrans, tsTransPullupMs, pMnode, pMnode->timer, &pMnode->transTimer)) { + if (taosTmrReset(mndPullupTrans, tsTransPullupInterval * 1000, pMnode, pMnode->timer, &pMnode->transTimer)) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } - if (taosTmrReset(mndCalMqRebalance, tsMaRebalanceMs, pMnode, pMnode->timer, &pMnode->mqTimer)) { + if (taosTmrReset(mndCalMqRebalance, tsMqRebalanceInterval * 1000, pMnode, pMnode->timer, &pMnode->mqTimer)) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; } diff --git a/source/dnode/mnode/impl/test/trans/trans2.cpp b/source/dnode/mnode/impl/test/trans/trans2.cpp index b3cbcb6898324ce647a8d025e4ee0a3c65df18cc..974c86b4231b03b53821bd1aece9a40c18200eec 100644 --- a/source/dnode/mnode/impl/test/trans/trans2.cpp +++ b/source/dnode/mnode/impl/test/trans/trans2.cpp @@ -58,7 +58,7 @@ class MndTestTrans2 : public ::testing::Test { strcpy(opt.replicas[0].fqdn, "localhost"); opt.msgCb = msgCb; - tsTransPullupMs = 1000; + tsTransPullupInterval = 1; const char *mnodepath = "/tmp/mnode_test_trans"; taosRemoveDir(mnodepath); diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 8850b353970aef29b8509d040877c95530b27230..ebf49c644bbfa1adcc0753a2ba8c68b2ee3aff63 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -157,7 +157,7 @@ struct SVnodeCfg { bool isWeak; STsdbCfg tsdbCfg; SWalCfg walCfg; - SSyncCfg syncCfg; // sync integration + SSyncCfg syncCfg; uint32_t hashBegin; uint32_t hashEnd; int8_t hashMethod; diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index ff0ebefb42baa7b93a92f2f38e2788ef8397db06..9294718550cd218e11ed5249b4da3e28bf400882 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -879,14 +879,14 @@ static TSKEY extractFirstTraverseKey(STableCheckInfo* pCheckInfo, int32_t order, } } - -static STSRow* getSRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update, STSRow** extraRow, TDRowVerT maxVer) { +static STSRow* getSRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int32_t update, STSRow** extraRow, + TDRowVerT maxVer) { STSRow *rmem = NULL, *rimem = NULL; if (pCheckInfo->iter) { SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter); if (node != NULL) { rmem = (STSRow*)SL_GET_NODE_DATA(node); -#if 0 // TODO: skiplist refactor +#if 0 // TODO: skiplist refactor if (TD_ROW_VER(rmem) > maxVer) { rmem = NULL; } @@ -898,7 +898,7 @@ static STSRow* getSRowInTableMem(STableCheckInfo* pCheckInfo, int32_t order, int SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter); if (node != NULL) { rimem = (STSRow*)SL_GET_NODE_DATA(node); -#if 0 // TODO: skiplist refactor +#if 0 // TODO: skiplist refactor if (TD_ROW_VER(rimem) > maxVer) { rimem = NULL; } @@ -1677,7 +1677,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa colIdOfRow2 = tdKvRowColIdAt(row2, k); } - if (colIdOfRow1 < colIdOfRow2) { // the most probability + if (colIdOfRow1 < colIdOfRow2) { // the most probability if (colIdOfRow1 < pColInfo->info.colId) { ++j; continue; @@ -1720,7 +1720,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa ++(*curRow); } ++nResult; - } else if (update){ + } else if (update) { mergeOption = 2; } else { mergeOption = 0; @@ -1741,7 +1741,7 @@ static int32_t mergeTwoRowFromMem(STsdbReadHandle* pTsdbReadHandle, int32_t capa ++(*curRow); } ++nResult; - } else if(update) { + } else if (update) { mergeOption = 2; } else { mergeOption = 0; @@ -2018,9 +2018,9 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf rv2 = TD_ROW_SVER(row2); } - numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, - pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); - // numOfRows += 1; + numOfRows += + mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, + pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; } @@ -2028,7 +2028,6 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf cur->win.ekey = key; cur->lastKey = key + step; cur->mixBlock = true; - moveToNextRowInMem(pCheckInfo); } else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it #if 0 @@ -2064,7 +2063,11 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf } #endif if (TD_SUPPORT_UPDATE(pCfg->update)) { - doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos); + if (lastKeyAppend != key) { + lastKeyAppend = key; + ++curRow; + } + numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, pos, pos); if (rv1 != TD_ROW_SVER(row1)) { // pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1)); @@ -2074,10 +2077,10 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf // pSchema2 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row2)); rv2 = TD_ROW_SVER(row2); } + numOfRows += + mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, + pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); - numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, row1, row2, numOfCols, - pCheckInfo->tableId, pSchema1, pSchema2, pCfg->update, &lastKeyAppend); - // ++numOfRows; if (cur->win.skey == TSKEY_INITIAL_VAL) { cur->win.skey = key; } @@ -2117,15 +2120,20 @@ static void doMergeTwoLevelData(STsdbReadHandle* pTsdbReadHandle, STableCheckInf int32_t qstart = 0, qend = 0; getQualifiedRowsPos(pTsdbReadHandle, pos, end, numOfRows, &qstart, &qend); - lastKeyAppend = tsArray[qend]; - numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, numOfRows, qstart, qend); + if ((lastKeyAppend != TSKEY_INITIAL_VAL) && + (lastKeyAppend != (ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? tsArray[qstart] : tsArray[qend]))) { + ++curRow; + } + numOfRows = doCopyRowsFromFileBlock(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, curRow, qstart, qend); pos += (qend - qstart + 1) * step; - if(numOfRows > 0) { + if (numOfRows > 0) { curRow = numOfRows - 1; } + cur->win.ekey = ASCENDING_TRAVERSE(pTsdbReadHandle->order) ? tsArray[qend] : tsArray[qstart]; cur->lastKey = cur->win.ekey + step; + lastKeyAppend = cur->win.ekey; } } while (numOfRows < pTsdbReadHandle->outputCapacity); @@ -2429,7 +2437,7 @@ static int32_t getFirstFileDataBlock(STsdbReadHandle* pTsdbReadHandle, bool* exi int32_t numOfTables = (int32_t)taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo); STsdbKeepCfg* pCfg = REPO_KEEP_CFG(pTsdbReadHandle->pTsdb); - STimeWindow win = TSWINDOW_INITIALIZER; + STimeWindow win = TSWINDOW_INITIALIZER; while (true) { tsdbRLockFS(REPO_FS(pTsdbReadHandle->pTsdb)); @@ -2735,7 +2743,6 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int STSchema* pSchema = NULL; TSKEY lastRowKey = TSKEY_INITIAL_VAL; - do { STSRow* row = getSRowInTableMem(pCheckInfo, pTsdbReadHandle->order, pCfg->update, NULL, TD_VER_MAX); if (row == NULL) { @@ -2760,8 +2767,8 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int pSchema = metaGetTbTSchema(REPO_META(pTsdbReadHandle->pTsdb), pCheckInfo->tableId, 0); rv = TD_ROW_SVER(row); } - numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, maxRowsToRead, &curRows, row, NULL, numOfCols, pCheckInfo->tableId, pSchema, - NULL, pCfg->update, &lastRowKey); + numOfRows += mergeTwoRowFromMem(pTsdbReadHandle, maxRowsToRead, &curRows, row, NULL, numOfCols, pCheckInfo->tableId, + pSchema, NULL, pCfg->update, &lastRowKey); if (numOfRows >= maxRowsToRead) { moveToNextRowInMem(pCheckInfo); @@ -2770,7 +2777,7 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int } while (moveToNextRowInMem(pCheckInfo)); - taosMemoryFreeClear(pSchema); // free the STSChema + taosMemoryFreeClear(pSchema); // free the STSChema assert(numOfRows <= maxRowsToRead); @@ -2898,8 +2905,8 @@ static bool loadCachedLastRow(STsdbReadHandle* pTsdbReadHandle) { // if (ret != TSDB_CODE_SUCCESS) { // return false; // } - mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, pRow, NULL, numOfCols, pCheckInfo->tableId, - NULL, NULL, true, &lastRowKey); + mergeTwoRowFromMem(pTsdbReadHandle, pTsdbReadHandle->outputCapacity, &curRow, pRow, NULL, numOfCols, + pCheckInfo->tableId, NULL, NULL, true, &lastRowKey); taosMemoryFreeClear(pRow); // update the last key value @@ -3468,7 +3475,7 @@ void tsdbRetrieveDataBlockInfo(tsdbReaderT* pTsdbReadHandle, SDataBlockInfo* pDa pDataBlockInfo->rows = cur->rows; pDataBlockInfo->window = cur->win; -// ASSERT(pDataBlockInfo->numOfCols >= (int32_t)(QH_GET_NUM_OF_COLS(pHandle)); + // ASSERT(pDataBlockInfo->numOfCols >= (int32_t)(QH_GET_NUM_OF_COLS(pHandle)); } /* diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c index 5e21abb40458691de8d72b9d0e17d03a49a88a0f..32866d74696648fbeff7c6ea87d2ef489eb06ff5 100644 --- a/source/dnode/vnode/src/vnd/vnodeCfg.c +++ b/source/dnode/vnode/src/vnd/vnodeCfg.c @@ -97,7 +97,6 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) { if (tjsonAddIntegerToObject(pJson, "hashEnd", pCfg->hashEnd) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "hashMethod", pCfg->hashMethod) < 0) return -1; - // sync integration if (tjsonAddIntegerToObject(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1; if (tjsonAddIntegerToObject(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1; SJson *pNodeInfoArr = tjsonCreateArray(); @@ -157,7 +156,6 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) { if (tjsonGetNumberValue(pJson, "hashEnd", pCfg->hashEnd) < 0) return -1; if (tjsonGetNumberValue(pJson, "hashMethod", pCfg->hashMethod) < 0) return -1; - // sync integration if (tjsonGetNumberValue(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1; if (tjsonGetNumberValue(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1; diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 7e80eacf8f8026f594998bda2a079c59d21f7145..3e869650bf8ec3934c8596598a365a1653912c98 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -124,8 +124,7 @@ _exit: int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) { pLoad->vgId = TD_VID(pVnode); - // pLoad->syncState = TAOS_SYNC_STATE_LEADER; - pLoad->syncState = syncGetMyRole(pVnode->sync); // sync integration + pLoad->syncState = syncGetMyRole(pVnode->sync); pLoad->numOfTables = metaGetTbNum(pVnode->pMeta); pLoad->numOfTimeSeries = 400; pLoad->totalStorage = 300; diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 8f7c11436d974733c33b4f061ab715c14f9a18b1..ee6bdc1a59b064854062c6f6699b628fb1b33a67 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -198,7 +198,6 @@ void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) { tsdbInsertTSmaData(((SVnode *)pVnode)->pTsdb, smaId, (const char *)data); } -// sync integration int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) { if (syncEnvIsStart()) { SSyncNode *pSyncNode = syncNodeAcquire(pVnode->sync); diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index 26393394e9bafd876d258d68ddd0ce79dc3621ff..1260f9a3e7cc121819599b5d3e149ba035bebdc3 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -14,12 +14,6 @@ */ #include "vnd.h" -// #include "sync.h" -// #include "syncTools.h" -// #include "tmsgcb.h" -// #include "vnodeInt.h" - -// sync integration int32_t vnodeSyncOpen(SVnode *pVnode, char *path) { SSyncInfo syncInfo; diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 118c840d5ea165c1866fe51b017a1f934f466cf4..75b6aeaae9c6f0da1aec8d4636c91fde576902ca 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -127,7 +127,7 @@ enum { int64_t gUdfTaskSeqNum = 0; typedef struct SUdfdProxy { - char udfdPipeName[UDF_LISTEN_PIPE_NAME_LEN]; + char udfdPipeName[PATH_MAX + UDF_LISTEN_PIPE_NAME_LEN + 2]; uv_barrier_t gUdfInitBarrier; uv_loop_t gUdfdLoop; @@ -224,9 +224,15 @@ int32_t getUdfdPipeName(char* pipeName, int32_t size) { size_t dnodeIdSize = sizeof(dnodeId); int32_t err = uv_os_getenv(UDF_DNODE_ID_ENV_NAME, dnodeId, &dnodeIdSize); if (err != 0) { + fnError("get dnode id from env. error: %s.", uv_err_name(err)); dnodeId[0] = '1'; } +#ifdef _WIN32 snprintf(pipeName, size, "%s%s", UDF_LISTEN_PIPE_NAME_PREFIX, dnodeId); +#else + snprintf(pipeName, size, "%s/%s%s", tsDataDir, UDF_LISTEN_PIPE_NAME_PREFIX, dnodeId); +#endif + fnInfo("get dnode id from env. dnode id: %s. pipe path: %s", dnodeId, pipeName); return 0; } @@ -998,7 +1004,7 @@ int32_t udfcOpen() { return 0; } SUdfdProxy *proxy = &gUdfdProxy; - getUdfdPipeName(proxy->udfdPipeName, UDF_LISTEN_PIPE_NAME_LEN); + getUdfdPipeName(proxy->udfdPipeName, sizeof(proxy->udfdPipeName)); proxy->gUdfcState = UDFC_STATE_STARTNG; uv_barrier_init(&proxy->gUdfInitBarrier, 2); uv_thread_create(&proxy->gUdfLoopThread, constructUdfService, proxy); @@ -1253,7 +1259,9 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResult return false; } UdfcFuncHandle handle; - if (setupUdf((char*)pCtx->udfName, &handle) != 0) { + int32_t udfCode = 0; + if ((udfCode = setupUdf((char*)pCtx->udfName, &handle)) != 0) { + fnError("udfAggInit error. step setupUdf. udf code: %d", udfCode); return false; } SClientUdfUvSession *session = (SClientUdfUvSession *)handle; @@ -1266,7 +1274,8 @@ bool udfAggInit(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResult udfRes->session = (SClientUdfUvSession *)handle; SUdfInterBuf buf = {0}; - if (callUdfAggInit(handle, &buf) != 0) { + if ((udfCode = callUdfAggInit(handle, &buf)) != 0) { + fnError("udfAggInit error. step callUdfAggInit. udf code: %d", udfCode); return false; } udfRes->interResNum = buf.numOfResult; @@ -1310,21 +1319,23 @@ int32_t udfAggProcess(struct SqlFunctionCtx *pCtx) { .numOfResult = udfRes->interResNum}; SUdfInterBuf newState = {0}; - callUdfAggProcess(session, inputBlock, &state, &newState); - - udfRes->interResNum = newState.numOfResult; - memcpy(udfRes->interResBuf, newState.buf, newState.bufLen); - + int32_t udfCode = callUdfAggProcess(session, inputBlock, &state, &newState); + if (udfCode != 0) { + fnError("udfAggProcess error. code: %d", udfCode); + newState.numOfResult = 0; + } else { + udfRes->interResNum = newState.numOfResult; + memcpy(udfRes->interResBuf, newState.buf, newState.bufLen); + } if (newState.numOfResult == 1 || state.numOfResult == 1) { GET_RES_INFO(pCtx)->numOfRes = 1; } blockDataDestroy(inputBlock); - taosArrayDestroy(tempBlock.pDataBlock); taosMemoryFree(newState.buf); - return 0; + return TSDB_CODE_SUCCESS; } int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) { @@ -1338,15 +1349,22 @@ int32_t udfAggFinalize(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock) { SUdfInterBuf state = {.buf = udfRes->interResBuf, .bufLen = session->bufSize, .numOfResult = udfRes->interResNum}; - callUdfAggFinalize(session, &state, &resultBuf); - - udfRes->finalResBuf = resultBuf.buf; - udfRes->finalResNum = resultBuf.numOfResult; - - teardownUdf(session); + int32_t udfCallCode= 0; + udfCallCode= callUdfAggFinalize(session, &state, &resultBuf); + if (udfCallCode!= 0) { + fnError("udfAggFinalize error. callUdfAggFinalize step. udf code:%d", udfCallCode); + GET_RES_INFO(pCtx)->numOfRes = 0; + } else { + memcpy(udfRes->finalResBuf, resultBuf.buf, session->outputLen); + udfRes->finalResNum = resultBuf.numOfResult; + GET_RES_INFO(pCtx)->numOfRes = udfRes->finalResNum; + } - if (resultBuf.numOfResult == 1) { - GET_RES_INFO(pCtx)->numOfRes = 1; + int32_t code = teardownUdf(session); + if (code != 0) { + fnError("udfAggFinalize error. teardownUdf step. udf code: %d", code); } + return functionFinalizeWithResultBuf(pCtx, pBlock, udfRes->finalResBuf); + } \ No newline at end of file diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index f5e4a9c6e6cf059e3927eb86b9f3cf7482c0f9b3..7695598fb805152a851d421713f469e59d08f08a 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -30,7 +30,7 @@ typedef struct SUdfdContext { uv_loop_t *loop; uv_pipe_t ctrlPipe; uv_signal_t intrSignal; - char listenPipeName[UDF_LISTEN_PIPE_NAME_LEN]; + char listenPipeName[PATH_MAX + UDF_LISTEN_PIPE_NAME_LEN + 2]; uv_pipe_t listeningPipe; void *clientRpc; @@ -652,7 +652,7 @@ static int32_t udfdUvInit() { uv_pipe_open(&global.ctrlPipe, 0); uv_read_start((uv_stream_t *)&global.ctrlPipe, udfdCtrlAllocBufCb, udfdCtrlReadCb); - getUdfdPipeName(global.listenPipeName, UDF_LISTEN_PIPE_NAME_LEN); + getUdfdPipeName(global.listenPipeName, sizeof(global.listenPipeName)); removeListeningPipe(); @@ -696,6 +696,7 @@ static int32_t udfdRun() { fnInfo("udfd stopped. result: %s, code: %d", uv_err_name(code), code); int codeClose = uv_loop_close(global.loop); fnDebug("uv loop close. result: %s", uv_err_name(codeClose)); + removeListeningPipe(); udfdCloseClientRpc(); uv_mutex_destroy(&global.udfsMutex); taosHashCleanup(global.udfsHash); diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index f5fab814ffdcb8b585e0171e0880944eda5e2557..5231890821d242bc8da03d87965752f76d2ba872 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -347,10 +347,21 @@ int32_t sclExecFunction(SFunctionNode *node, SScalarCtx *ctx, SScalarParam *outp if (fmIsUserDefinedFunc(node->funcId)) { UdfcFuncHandle udfHandle = NULL; - SCL_ERR_JRET(setupUdf(node->functionName, &udfHandle)); + code = setupUdf(node->functionName, &udfHandle); + if (code != 0) { + sclError("fmExecFunction error. setupUdf. function name: %s, code:%d", node->functionName, code); + goto _return; + } code = callUdfScalarFunc(udfHandle, params, paramNum, output); - teardownUdf(udfHandle); - SCL_ERR_JRET(code); + if (code != 0) { + sclError("fmExecFunction error. callUdfScalarFunc. function name: %s, udf code:%d", node->functionName, code); + goto _return; + } + code = teardownUdf(udfHandle); + if (code != 0) { + sclError("fmExecFunction error. callUdfScalarFunc. function name: %s, udf code:%d", node->functionName, code); + goto _return; + } } else { SScalarFuncExecFuncs ffpSet = {0}; code = fmGetScalarFuncExecFuncs(node->funcId, &ffpSet); diff --git a/source/os/src/osDir.c b/source/os/src/osDir.c index a29ab8b454e4388b2477e6b6c9b690c007a73123..72654d008443a7e42c41a7381d0ad936a41aee2f 100644 --- a/source/os/src/osDir.c +++ b/source/os/src/osDir.c @@ -26,7 +26,6 @@ typedef struct TdDirEntry { WIN32_FIND_DATA findFileData; } TdDirEntry; - typedef struct TdDir { TdDirEntry dirEntry; HANDLE hFind; @@ -59,7 +58,7 @@ void wordfree(wordexp_t *pwordexp) {} #include typedef struct dirent dirent; -typedef struct DIR TdDir; +typedef struct DIR TdDir; typedef struct dirent TdDirEntry; #endif @@ -78,14 +77,14 @@ void taosRemoveDir(const char *dirname) { taosRemoveDir(filename); } else { (void)taosRemoveFile(filename); - //printf("file:%s is removed\n", filename); + // printf("file:%s is removed\n", filename); } } taosCloseDir(&pDir); rmdir(dirname); - //printf("dir:%s is removed\n", dirname); + // printf("dir:%s is removed\n", dirname); return; } @@ -102,8 +101,8 @@ int32_t taosMkDir(const char *dirname) { int32_t taosMulMkDir(const char *dirname) { if (dirname == NULL) return -1; - char *temp = strdup(dirname); - char *pos = temp; + char * temp = strdup(dirname); + char * pos = temp; int32_t code = 0; if (strncmp(temp, "/", 1) == 0) { @@ -111,8 +110,8 @@ int32_t taosMulMkDir(const char *dirname) { } else if (strncmp(temp, "./", 2) == 0) { pos += 2; } - - for ( ; *pos != '\0'; pos++) { + + for (; *pos != '\0'; pos++) { if (*pos == '/') { *pos = '\0'; code = mkdir(temp, 0755); @@ -123,7 +122,7 @@ int32_t taosMulMkDir(const char *dirname) { *pos = '/'; } } - + if (*(pos - 1) != '/') { code = mkdir(temp, 0755); if (code < 0 && errno != EEXIST) { @@ -145,7 +144,7 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) { TdDirPtr pDir = taosOpenDir(dirname); if (pDir == NULL) return; - int64_t sec = taosGetTimestampSec(); + int64_t sec = taosGetTimestampSec(); TdDirEntryPtr de = NULL; while ((de = taosReadDir(pDir)) != NULL) { @@ -173,9 +172,9 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) { int32_t days = (int32_t)(TABS(sec - fileSec) / 86400 + 1); if (days > keepDays) { (void)taosRemoveFile(filename); - //printf("file:%s is removed, days:%d keepDays:%d", filename, days, keepDays); + // printf("file:%s is removed, days:%d keepDays:%d", filename, days, keepDays); } else { - //printf("file:%s won't be removed, days:%d keepDays:%d", filename, days, keepDays); + // printf("file:%s won't be removed, days:%d keepDays:%d", filename, days, keepDays); } } } @@ -187,7 +186,7 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) { int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen) { wordexp_t full_path; if (0 != wordexp(dirname, &full_path, 0)) { - //printf("failed to expand path:%s since %s", dirname, strerror(errno)); + // printf("failed to expand path:%s since %s", dirname, strerror(errno)); wordfree(&full_path); return -1; } @@ -228,9 +227,9 @@ bool taosIsDir(const char *dirname) { return false; } -char* taosDirName(char *name) { +char *taosDirName(char *name) { #ifdef WINDOWS - char Drive1[MAX_PATH], Dir1[MAX_PATH]; + char Drive1[MAX_PATH], Dir1[MAX_PATH]; _splitpath(name, Drive1, Dir1, NULL, NULL); size_t dirNameLen = strlen(Drive1) + strlen(Dir1); if (dirNameLen > 0) { @@ -242,13 +241,13 @@ char* taosDirName(char *name) { #endif } -char* taosDirEntryBaseName(char *name) { +char *taosDirEntryBaseName(char *name) { #ifdef WINDOWS char Filename1[MAX_PATH], Ext1[MAX_PATH]; _splitpath(name, NULL, NULL, Filename1, Ext1); return name + (strlen(name) - strlen(Filename1) - strlen(Ext1)); #else - return (char*)basename(name); + return (char *)basename(name); #endif } @@ -258,8 +257,8 @@ TdDirPtr taosOpenDir(const char *dirname) { } #ifdef WINDOWS - char szFind[MAX_PATH]; //这是要找的 - HANDLE hFind; + char szFind[MAX_PATH]; //这是要找的 + HANDLE hFind; TdDirPtr pDir = taosMemoryMalloc(sizeof(TdDir)); @@ -275,7 +274,6 @@ TdDirPtr taosOpenDir(const char *dirname) { #else return (TdDirPtr)opendir(dirname); #endif - } TdDirEntryPtr taosReadDir(TdDirPtr pDir) { @@ -286,9 +284,9 @@ TdDirEntryPtr taosReadDir(TdDirPtr pDir) { if (!FindNextFile(pDir->hFind, &(pDir->dirEntry.findFileData))) { return NULL; } - return (TdDirEntryPtr)&(pDir->dirEntry.findFileData); + return (TdDirEntryPtr) & (pDir->dirEntry.findFileData); #else - return (TdDirEntryPtr)readdir((DIR*)pDir); + return (TdDirEntryPtr)readdir((DIR *)pDir); #endif } @@ -299,18 +297,18 @@ bool taosDirEntryIsDir(TdDirEntryPtr pDirEntry) { #ifdef WINDOWS return (pDirEntry->findFileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0; #else - return (((dirent*)pDirEntry)->d_type & DT_DIR) != 0; + return (((dirent *)pDirEntry)->d_type & DT_DIR) != 0; #endif } -char* taosGetDirEntryName(TdDirEntryPtr pDirEntry) { +char *taosGetDirEntryName(TdDirEntryPtr pDirEntry) { if (pDirEntry == NULL) { return NULL; } #ifdef WINDOWS return pDirEntry->findFileData.cFileName; #else - return ((dirent*)pDirEntry)->d_name; + return ((dirent *)pDirEntry)->d_name; #endif } @@ -324,7 +322,7 @@ int32_t taosCloseDir(TdDirPtr *ppDir) { *ppDir = NULL; return 0; #else - closedir((DIR*)*ppDir); + closedir((DIR *)*ppDir); *ppDir = NULL; return 0; #endif diff --git a/source/os/src/osSocket.c b/source/os/src/osSocket.c index f3da490f36d7bedd1c55420970674c269ce4d13c..9e29f44ed31c5a4943e85a7ebdcdffd3483965ec 100644 --- a/source/os/src/osSocket.c +++ b/source/os/src/osSocket.c @@ -28,6 +28,7 @@ #else #include #include +#include #include #include #include @@ -638,6 +639,73 @@ int32_t taosKeepTcpAlive(TdSocketPtr pSocket) { return 0; } +int taosGetLocalIp(const char *eth, char *ip) { +#if defined(WINDOWS) + // DO NOTHAING + return 0; +#else + int fd; + struct ifreq ifr; + struct sockaddr_in sin; + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (-1 == fd) { + return -1; + } + strncpy(ifr.ifr_name, eth, IFNAMSIZ); + ifr.ifr_name[IFNAMSIZ - 1] = 0; + + if (ioctl(fd, SIOCGIFADDR, &ifr) < 0) { + taosCloseSocketNoCheck1(fd); + return -1; + } + memcpy(&sin, &ifr.ifr_addr, sizeof(sin)); + snprintf(ip, 64, "%s", inet_ntoa(sin.sin_addr)); + taosCloseSocketNoCheck1(fd); +#endif + return 0; +} +int taosValidIp(uint32_t ip) { +#if defined(WINDOWS) + // DO NOTHAING + return 0; +#else + int ret = -1; + int fd; + + struct ifconf ifconf; + + char buf[512] = {0}; + ifconf.ifc_len = 512; + ifconf.ifc_buf = buf; + + if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) { + return -1; + } + + ioctl(fd, SIOCGIFCONF, &ifconf); + struct ifreq *ifreq = (struct ifreq *)ifconf.ifc_buf; + for (int i = (ifconf.ifc_len / sizeof(struct ifreq)); i > 0; i--) { + char ip_str[64] = {0}; + if (ifreq->ifr_flags == AF_INET) { + ret = taosGetLocalIp(ifreq->ifr_name, ip_str); + if (ret != 0) { + break; + } + ret = -1; + if (ip == (uint32_t)taosInetAddr(ip_str)) { + ret = 0; + break; + } + ifreq++; + } + } + taosCloseSocketNoCheck1(fd); + return ret; +#endif + return 0; +} + bool taosValidIpAndPort(uint32_t ip, uint16_t port) { struct sockaddr_in serverAdd; SocketFd fd; @@ -677,13 +745,8 @@ bool taosValidIpAndPort(uint32_t ip, uint16_t port) { taosCloseSocket(&pSocket); return false; } - if (listen(pSocket->fd, 1024) < 0) { - // printf("listen tcp server socket failed, 0x%x:%hu(%s)", ip, port, strerror(errno)); - taosCloseSocket(&pSocket); - return false; - } taosCloseSocket(&pSocket); - return true; + return 0 == taosValidIp(ip) ? true : false; } TdSocketServerPtr taosOpenTcpServerSocket(uint32_t ip, uint16_t port) { struct sockaddr_in serverAdd; diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 8cce169d9976d4f2ebbfa7613125e939232926b7..f2b9e0caabb3bc9fc7d6f6ddb5e795e7eeb5db6c 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -62,6 +62,8 @@ # ---- tstream ./test.sh -f tsim/tstream/basic0.sim +# ---- transaction + ./test.sh -f tsim/trans/create_db.sim # ---- tmq ./test.sh -f tsim/tmq/basic1.sim diff --git a/tests/script/tsim/trans/create_db.sim b/tests/script/tsim/trans/create_db.sim new file mode 100644 index 0000000000000000000000000000000000000000..0db5add88aeb6ea217cfe932ab3600398d3dd886 --- /dev/null +++ b/tests/script/tsim/trans/create_db.sim @@ -0,0 +1,166 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/cfg.sh -n dnode1 -c transPullupInterval -v 1 +system sh/cfg.sh -n dnode2 -c transPullupInterval -v 1 +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +sql connect + +print =============== show dnodes +sql show dnodes; +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi + +sql show mnodes; +if $rows != 1 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi + +if $data02 != LEADER then + return -1 +endi + +print =============== create dnodes +sql create dnode $hostname port 7200 +sleep 2000 + +sql show dnodes; +if $rows != 2 then + return -1 +endi + +if $data00 != 1 then + return -1 +endi + +if $data10 != 2 then + return -1 +endi + +print =============== kill dnode2 +system sh/exec.sh -n dnode2 -s stop -x SIGINT + +print =============== create database +sql show transactions +if $rows != 0 then + return -1 +endi + +sql_error create database d1 vgroups 2; + +print =============== show transactions +sql show transactions +if $rows != 1 then + return -1 +endi + +if $data[0][0] != 2 then + return -1 +endi + +if $data[0][2] != undoAction then + return -1 +endi + +if $data[0][3] != d1 then + return -1 +endi + +if $data[0][4] != create-db then + return -1 +endi + +if $data[0][7] != @Unable to establish connection@ then + return -1 +endi + +sql_error create database d1 vgroups 2; + +print =============== start dnode2 +system sh/exec.sh -n dnode2 -s start +sleep 3000 + +sql show transactions +if $rows != 0 then + return -1 +endi + +sql create database d1 vgroups 2; + +print =============== kill dnode2 +system sh/exec.sh -n dnode2 -s stop -x SIGINT + +print =============== create database +sql show transactions +if $rows != 0 then + return -1 +endi + +sql_error create database d2 vgroups 2; + +print =============== show transactions +sql show transactions +if $rows != 1 then + return -1 +endi + +if $data[0][0] != 4 then + return -1 +endi + +if $data[0][2] != undoAction then + return -1 +endi + +if $data[0][3] != d2 then + return -1 +endi + +if $data[0][4] != create-db then + return -1 +endi + +if $data[0][7] != @Unable to establish connection@ then + return -1 +endi + +sql_error create database d2 vgroups 2; + +print =============== kill transaction +sql kill transaction 4; +sleep 2000 + +sql show transactions +if $rows != 0 then + return -1 +endi + +print =============== start dnode2 +system sh/exec.sh -n dnode2 -s start +sleep 3000 + +sql show transactions +if $rows != 0 then + return -1 +endi + +sql create database d2 vgroups 2; +sql_error kill transaction 1; +sql_error kill transaction 2; +sql_error kill transaction 3; +sql_error kill transaction 4; +sql_error kill transaction 5; + +return +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/system-test/0-others/taosdMonitor.py b/tests/system-test/0-others/taosdMonitor.py new file mode 100644 index 0000000000000000000000000000000000000000..a3d3b052047faa12618a0b68846518269c9de3f5 --- /dev/null +++ b/tests/system-test/0-others/taosdMonitor.py @@ -0,0 +1,311 @@ +import taos +import sys +import time +import socket +import pexpect +import os +import http.server +import gzip +import threading +import json + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +telemetryPort = '6043' + + +def telemetryInfoCheck(infoDict=''): + + hostname = socket.gethostname() + serverPort = 7080 + + if "ts" not in infoDict or len(infoDict["ts"]) == 0: + tdLog.exit("ts is null!") + + if "dnode_id" not in infoDict or infoDict["dnode_id"] != 1: + tdLog.exit("dnode_id is null!") + + if "dnode_ep" not in infoDict: + tdLog.exit("dnode_ep is null!") + + if "cluster_id" not in infoDict: + tdLog.exit("cluster_id is null!") + + if "protocol" not in infoDict or infoDict["protocol"] != 1: + tdLog.exit("protocol is null!") + + if "cluster_info" not in infoDict : + tdLog.exit("cluster_info is null!") + + # cluster_info ==================================== + + if "first_ep" not in infoDict["cluster_info"] or infoDict["cluster_info"]["first_ep"] == None: + tdLog.exit("first_ep is null!") + + if "first_ep_dnode_id" not in infoDict["cluster_info"] or infoDict["cluster_info"]["first_ep_dnode_id"] != 1: + tdLog.exit("first_ep_dnode_id is null!") + + if "version" not in infoDict["cluster_info"] or infoDict["cluster_info"]["version"] == None: + tdLog.exit("first_ep_dnode_id is null!") + + if "master_uptime" not in infoDict["cluster_info"] or infoDict["cluster_info"]["master_uptime"] == None: + tdLog.exit("master_uptime is null!") + + if "monitor_interval" not in infoDict["cluster_info"] or infoDict["cluster_info"]["monitor_interval"] !=5: + tdLog.exit("monitor_interval is null!") + + if "vgroups_total" not in infoDict["cluster_info"] or infoDict["cluster_info"]["vgroups_total"] < 0: + tdLog.exit("vgroups_total is null!") + + if "vgroups_alive" not in infoDict["cluster_info"] or infoDict["cluster_info"]["vgroups_alive"] < 0: + tdLog.exit("vgroups_alive is null!") + + if "connections_total" not in infoDict["cluster_info"] or infoDict["cluster_info"]["connections_total"] < 0 : + tdLog.exit("connections_total is null!") + + if "dnodes" not in infoDict["cluster_info"] or infoDict["cluster_info"]["dnodes"] == None : + tdLog.exit("dnodes is null!") + + dnodes_info = { "dnode_id": 1,"dnode_ep": f"{hostname}:{serverPort}","status":"ready"} + + for k ,v in dnodes_info.items(): + if k not in infoDict["cluster_info"]["dnodes"][0] or v != infoDict["cluster_info"]["dnodes"][0][k] : + tdLog.exit("dnodes info is null!") + + mnodes_info = { "mnode_id":1, "mnode_ep":f"{hostname}:{serverPort}","role": "LEADER" } + + for k ,v in mnodes_info.items(): + if k not in infoDict["cluster_info"]["mnodes"][0] or v != infoDict["cluster_info"]["mnodes"][0][k] : + tdLog.exit("mnodes info is null!") + + # vgroup_infos ==================================== + + if "vgroup_infos" not in infoDict or infoDict["vgroup_infos"]== None: + tdLog.exit("vgroup_infos is null!") + + vgroup_infos_nums = len(infoDict["vgroup_infos"]) + + for index in range(vgroup_infos_nums): + if "vgroup_id" not in infoDict["vgroup_infos"][index] or infoDict["vgroup_infos"][index]["vgroup_id"]<0: + tdLog.exit("vgroup_id is null!") + if "database_name" not in infoDict["vgroup_infos"][index] or len(infoDict["vgroup_infos"][index]["database_name"]) < 0: + tdLog.exit("database_name is null!") + if "tables_num" not in infoDict["vgroup_infos"][index] or infoDict["vgroup_infos"][index]["tables_num"]!= 0: + tdLog.exit("tables_num is null!") + if "status" not in infoDict["vgroup_infos"][index] or len(infoDict["vgroup_infos"][index]["status"]) < 0 : + tdLog.exit("status is null!") + if "vnodes" not in infoDict["vgroup_infos"][index] or infoDict["vgroup_infos"][index]["vnodes"] ==None : + tdLog.exit("vnodes is null!") + if "dnode_id" not in infoDict["vgroup_infos"][index]["vnodes"][0] or infoDict["vgroup_infos"][index]["vnodes"][0]["dnode_id"] < 0 : + tdLog.exit("vnodes is null!") + + # grant_info ==================================== + + if "grant_info" not in infoDict or infoDict["grant_info"]== None: + tdLog.exit("grant_info is null!") + + if "expire_time" not in infoDict["grant_info"] or not infoDict["grant_info"]["expire_time"] > 0: + tdLog.exit("expire_time is null!") + + if "timeseries_used" not in infoDict["grant_info"] or not infoDict["grant_info"]["timeseries_used"] > 0: + tdLog.exit("timeseries_used is null!") + + if "timeseries_total" not in infoDict["grant_info"] or not infoDict["grant_info"]["timeseries_total"] > 0: + tdLog.exit("timeseries_total is null!") + + # dnode_info ==================================== + + if "dnode_info" not in infoDict or infoDict["dnode_info"]== None: + tdLog.exit("dnode_info is null!") + + dnode_infos = ['uptime', 'cpu_engine', 'cpu_system', 'cpu_cores', 'mem_engine', 'mem_system', 'mem_total', 'disk_engine', + 'disk_used', 'disk_total', 'net_in', 'net_out', 'io_read', 'io_write', 'io_read_disk', 'io_write_disk', 'req_select', + 'req_select_rate', 'req_insert', 'req_insert_success', 'req_insert_rate', 'req_insert_batch', 'req_insert_batch_success', + 'req_insert_batch_rate', 'errors', 'vnodes_num', 'masters', 'has_mnode', 'has_qnode', 'has_snode', 'has_bnode'] + for elem in dnode_infos: + if elem not in infoDict["dnode_info"] or infoDict["dnode_info"][elem] < 0: + tdLog.exit(f"{elem} is null!") + + # dnode_info ==================================== + + if "disk_infos" not in infoDict or infoDict["disk_infos"]== None: + tdLog.exit("disk_infos is null!") + + # bug for data_dir + if "datadir" not in infoDict["disk_infos"] or len(infoDict["disk_infos"]["datadir"]) <=0 : + tdLog.exit("datadir is null!") + + if "name" not in infoDict["disk_infos"]["datadir"][0] or len(infoDict["disk_infos"]["datadir"][0]["name"]) <= 0: + tdLog.exit("name is null!") + + if "level" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["level"] < 0: + tdLog.exit("level is null!") + + if "avail" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["avail"] <= 0: + tdLog.exit("avail is null!") + + if "used" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["used"] <= 0: + tdLog.exit("used is null!") + + if "total" not in infoDict["disk_infos"]["datadir"][0] or infoDict["disk_infos"]["datadir"][0]["total"] <= 0: + tdLog.exit("total is null!") + + + if "logdir" not in infoDict["disk_infos"] or infoDict["disk_infos"]["logdir"]== None: + tdLog.exit("logdir is null!") + + if "name" not in infoDict["disk_infos"]["logdir"] or len(infoDict["disk_infos"]["logdir"]["name"]) <= 0: + tdLog.exit("name is null!") + + if "avail" not in infoDict["disk_infos"]["logdir"] or infoDict["disk_infos"]["logdir"]["avail"] <= 0: + tdLog.exit("avail is null!") + + if "used" not in infoDict["disk_infos"]["logdir"] or infoDict["disk_infos"]["logdir"]["used"] <= 0: + tdLog.exit("used is null!") + + if "total" not in infoDict["disk_infos"]["logdir"] or infoDict["disk_infos"]["logdir"]["total"] <= 0: + tdLog.exit("total is null!") + + + + if "tempdir" not in infoDict["disk_infos"] or infoDict["disk_infos"]["tempdir"]== None: + tdLog.exit("tempdir is null!") + + if "name" not in infoDict["disk_infos"]["tempdir"] or len(infoDict["disk_infos"]["tempdir"]["name"]) <= 0: + tdLog.exit("name is null!") + + if "avail" not in infoDict["disk_infos"]["tempdir"] or infoDict["disk_infos"]["tempdir"]["avail"] <= 0: + tdLog.exit("avail is null!") + + if "used" not in infoDict["disk_infos"]["tempdir"] or infoDict["disk_infos"]["tempdir"]["used"] <= 0: + tdLog.exit("used is null!") + + if "total" not in infoDict["disk_infos"]["tempdir"] or infoDict["disk_infos"]["tempdir"]["total"] <= 0: + tdLog.exit("total is null!") + + + # log_infos ==================================== + + if "log_infos" not in infoDict or infoDict["log_infos"]== None: + tdLog.exit("log_infos is null!") + + if "logs" not in infoDict["log_infos"] or len(infoDict["log_infos"]["logs"])!= 10: + tdLog.exit("logs is null!") + + if "ts" not in infoDict["log_infos"]["logs"][0] or len(infoDict["log_infos"]["logs"][0]["ts"]) <= 10: + tdLog.exit("ts is null!") + + if "level" not in infoDict["log_infos"]["logs"][0] or infoDict["log_infos"]["logs"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]: + tdLog.exit("level is null!") + + if "content" not in infoDict["log_infos"]["logs"][0] or len(infoDict["log_infos"]["logs"][0]["ts"]) <= 1: + tdLog.exit("content is null!") + + if "summary" not in infoDict["log_infos"] or len(infoDict["log_infos"]["summary"])!= 4: + tdLog.exit("summary is null!") + + + if "total" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["total"] < 0 : + tdLog.exit("total is null!") + + if "level" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]: + tdLog.exit("level is null!") + +class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): + def do_GET(self): + """ + process GET request + """ + + def do_POST(self): + """ + process POST request + """ + contentEncoding = self.headers["Content-Encoding"] + + if contentEncoding == 'gzip': + req_body = self.rfile.read(int(self.headers["Content-Length"])) + plainText = gzip.decompress(req_body).decode() + else: + plainText = self.rfile.read(int(self.headers["Content-Length"])).decode() + + print(plainText) + # 1. send response code and header + self.send_response(200) + self.send_header("Content-Type", "text/html; charset=utf-8") + self.end_headers() + + # 2. send response content + #self.wfile.write(("Hello World: " + req_body + "\n").encode("utf-8")) + + # 3. check request body info + infoDict = json.loads(plainText) + #print("================") + # print(infoDict) + telemetryInfoCheck(infoDict) + + # 4. shutdown the server and exit case + assassin = threading.Thread(target=httpServer.shutdown) + assassin.daemon = True + assassin.start() + print ("==== shutdown http server ====") + +class TDTestCase: + hostname = socket.gethostname() + serverPort = '7080' + rpcDebugFlagVal = '143' + clientCfgDict = {'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + clientCfgDict["serverPort"] = serverPort + clientCfgDict["firstEp"] = hostname + ':' + serverPort + clientCfgDict["secondEp"] = hostname + ':' + serverPort + clientCfgDict["rpcDebugFlag"] = rpcDebugFlagVal + clientCfgDict["fqdn"] = hostname + + updatecfgDict = {'clientCfg': {}, 'serverPort': '', 'firstEp': '', 'secondEp':'', 'rpcDebugFlag':'135', 'fqdn':''} + updatecfgDict["clientCfg"] = clientCfgDict + updatecfgDict["serverPort"] = serverPort + updatecfgDict["firstEp"] = hostname + ':' + serverPort + updatecfgDict["secondEp"] = hostname + ':' + serverPort + updatecfgDict["fqdn"] = hostname + + updatecfgDict["monitorFqdn"] = hostname + updatecfgDict["monitorPort"] = '6043' + updatecfgDict["monitor"] = '1' + updatecfgDict["monitorInterval"] = "5" + updatecfgDict["monitorMaxLogs"] = "10" + updatecfgDict["monitorComp"] = "1" + + print ("===================: ", updatecfgDict) + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring + tdSql.prepare() + # time.sleep(2) + vgroups = "30" + sql = "create database db3 vgroups " + vgroups + tdSql.query(sql) + + # loop to wait request + httpServer.serve_forever() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +# create http server: bing ip/port , and request processor +serverAddress = ("", int(telemetryPort)) +httpServer = http.server.HTTPServer(serverAddress, RequestHandlerImpl) + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) + + + + + diff --git a/tests/system-test/1-insert/insertWithMoreVgroup.py b/tests/system-test/1-insert/insertWithMoreVgroup.py index a7d17bc41ebde9ed6da44c94ddd3265ec0777fa0..b583ee93e8112116cecdc0cbfd0adaa59d661ad1 100644 --- a/tests/system-test/1-insert/insertWithMoreVgroup.py +++ b/tests/system-test/1-insert/insertWithMoreVgroup.py @@ -12,6 +12,7 @@ # -*- coding: utf-8 -*- import sys +import os import threading import multiprocessing as mp from numpy.lib.function_base import insert @@ -66,14 +67,19 @@ class TDTestCase: # run case def run(self): - # test base case - self.test_case1() - tdLog.debug(" LIMIT test_case1 ............ [OK]") + # # test base case + # self.test_case1() + # tdLog.debug(" LIMIT test_case1 ............ [OK]") - # test advance case + # test case # self.test_case2() # tdLog.debug(" LIMIT test_case2 ............ [OK]") + # test case + self.test_case3() + tdLog.debug(" LIMIT test_case3 ............ [OK]") + + # stop def stop(self): tdSql.close() @@ -115,11 +121,12 @@ class TDTestCase: return cur def new_create_tables(self,dbname,vgroups,stbname,tcountStart,tcountStop): - host = "chenhaoran02" + host = "localhost" buildPath = self.getBuildPath() config = buildPath+ "../sim/dnode1/cfg/" tsql=self.newcur(host,config) + tsql.execute("drop database if exists %s"%dbname) tsql.execute("create database %s vgroups %d"%(dbname,vgroups)) tsql.execute("use %s" %dbname) tsql.execute("create stable %s(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%stbname) @@ -182,7 +189,52 @@ class TDTestCase: tdLog.debug("INSERT TABLE DATA ............ [OK]") return + def taosBench(self,jsonFile): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + taosBenchbin = buildPath+ "/build/bin/taosBenchmark" + os.system("%s -f %s -y " %(taosBenchbin,jsonFile)) + + return + def taosBenchCreate(self,dbname,stbname,vgroups,threadNumbers,count): + # count=50000 + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + taosBenchbin = buildPath+ "/build/bin/taosBenchmark" + + # insert: create one or mutiple tables per sql and insert multiple rows per sql + tdSql.execute("drop database if exists %s"%dbname) + tdSql.execute("create database %s vgroups %d"%(dbname,vgroups)) + tdSql.execute("use %s" %dbname) + + threads = [] + # threadNumbers=2 + for i in range(threadNumbers): + jsonfile="1-insert/Vgroups%d%d.json"%(vgroups,i) + os.system("cp -f 1-insert/manyVgroups.json %s"%(jsonfile)) + os.system("sed -i 's/\"name\": \"db\",/\"name\": \"%s%d\",/g' %s"%(dbname,i,jsonfile)) + os.system("sed -i 's/\"childtable_count\": 300000,/\"childtable_count\": %d,/g' %s "%(count,jsonfile)) + os.system("sed -i 's/\"name\": \"stb1\",/\"name\": \"%s%d\",/g' %s "%(stbname,i,jsonfile)) + os.system("sed -i 's/\"childtable_prefix\": \"stb1_\",/\"childtable_prefix\": \"%s%d_\",/g' %s "%(stbname,i,jsonfile)) + threads.append(mp.Process(target=self.taosBench, args=("%s"%jsonfile,))) + start_time = time.time() + for tr in threads: + tr.start() + for tr in threads: + tr.join() + end_time = time.time() + + spendTime=end_time-start_time + speedCreate=count/spendTime + tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate)) + return # test case1 base def test_case1(self): tdLog.debug("-----create database and tables test------- ") @@ -284,6 +336,12 @@ class TDTestCase: return + def test_case3(self): + + self.taosBenchCreate("db1", "stb1", 1, 2, 1*50000) + + return + # # add case with filename # diff --git a/tests/system-test/1-insert/manyVgroups.json b/tests/system-test/1-insert/manyVgroups.json new file mode 100644 index 0000000000000000000000000000000000000000..df6f1163e84a6c9e62464b276e3a51902fbe9aee --- /dev/null +++ b/tests/system-test/1-insert/manyVgroups.json @@ -0,0 +1,76 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos/", + "host": "test216", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 8, + "thread_count_create_tbl": 8, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "interlace_rows": 100000, + "num_of_records_per_req": 100000, + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "vgroups": 1 + }, + "super_tables": [ + { + "name": "stb1", + "child_table_exists": "no", + "childtable_count": 300000, + "childtable_prefix": "stb1_", + "auto_create_table": "no", + "batch_create_tbl_num": 50000, + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 0, + "interlace_rows": 0, + "insert_interval": 0, + "max_sql_len": 10000000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 10, + "sample_format": "csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + }, + { + "type": "DOUBLE", + "count": 100 + }, + { + "type": "BINARY", + "len": 400, + "count": 10 + }, + { + "type": "nchar", + "len": 200, + "count": 20 + } + ], + "tags": [ + { + "type": "TINYINT", + "count": 2 + }, + { + "type": "BINARY", + "len": 16, + "count": 2 + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/system-test/1-insert/mutipythonnodebugtaosd.py b/tests/system-test/1-insert/mutipythonnodebugtaosd.py new file mode 100644 index 0000000000000000000000000000000000000000..73d70b43488b57345a822f971190f4e62212f865 --- /dev/null +++ b/tests/system-test/1-insert/mutipythonnodebugtaosd.py @@ -0,0 +1,299 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + + +import sys +import os +selfPath = os.path.dirname(os.path.realpath(__file__)) +utilPath="%s/../../pytest/"%selfPath +import threading +import multiprocessing as mp +from numpy.lib.function_base import insert +import taos +sys.path.append(utilPath) +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np +import datetime as dt +import time +# constant define +WAITS = 5 # wait seconds + +class TDTestCase: + # + # --------------- main frame ------------------- + # + + def caseDescription(self): + ''' + limit and offset keyword function test cases; + case1: limit offset base function test + case2: offset return valid + ''' + return + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root)-len("/build/bin")] + break + return buildPath + + # init + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + # tdSql.init(conn.cursor()) + # tdSql.prepare() + # self.create_tables(); + self.ts = 1500000000000 + + + # run case + def run(self): + + # test base case + self.test_case1() + tdLog.debug(" LIMIT test_case1 ............ [OK]") + + # test advance case + # self.test_case2() + # tdLog.debug(" LIMIT test_case2 ............ [OK]") + + # stop + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + # --------------- case ------------------- + + # create tables + def create_tables(self,dbname,stbname,count): + tdSql.execute("use %s" %dbname) + tdSql.execute("create stable %s(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%stbname) + pre_create = "create table" + sql = pre_create + tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + # print(time.time()) + exeStartTime=time.time() + for i in range(count): + sql += " %s_%d using %s tags(%d)"%(stbname,i,stbname,i+1) + if i >0 and i%3000 == 0: + tdSql.execute(sql) + sql = pre_create + # print(time.time()) + # end sql + if sql != pre_create: + tdSql.execute(sql) + exeEndTime=time.time() + spendTime=exeEndTime-exeStartTime + speedCreate=count/spendTime + tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate)) + return + + def newcur(self,host,cfg): + user = "root" + password = "taosdata" + port =6030 + con=taos.connect(host=host, user=user, password=password, config=cfg ,port=port) + cur=con.cursor() + print(cur) + return cur + + def new_create_tables(self,dbname,vgroups,stbname,tcountStart,tcountStop): + host = "127.0.0.1" + buildPath = self.getBuildPath() + config = buildPath+ "../sim/dnode1/cfg/" + + tsql=self.newcur(host,config) + tsql.execute("drop database if exists %s" %(dbname)) + tsql.execute("create database if not exists %s vgroups %d"%(dbname,vgroups)) + tsql.execute("use %s" %dbname) + tsql.execute("create stable %s(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)"%stbname) + + pre_create = "create table" + sql = pre_create + tcountStop=int(tcountStop) + tcountStart=int(tcountStart) + count=tcountStop-tcountStart + + tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname)) + # print(time.time()) + exeStartTime=time.time() + # print(type(tcountStop),type(tcountStart)) + for i in range(tcountStart,tcountStop): + sql += " %s_%d using %s tags(%d)"%(stbname,i,stbname,i+1) + if i >0 and i%20000 == 0: + # print(sql) + tsql.execute(sql) + sql = pre_create + # print(time.time()) + # end sql + if sql != pre_create: + # print(sql) + tsql.execute(sql) + exeEndTime=time.time() + spendTime=exeEndTime-exeStartTime + speedCreate=count/spendTime + # tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate)) + return + + + + # insert data + def insert_data(self, dbname, stbname, ts_start, tcountStart,tcountStop,rowCount): + tdSql.execute("use %s" %dbname) + pre_insert = "insert into " + sql = pre_insert + tcount=tcountStop-tcountStart + allRows=tcount*rowCount + tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbname, allRows)) + exeStartTime=time.time() + for i in range(tcountStart,tcountStop): + sql += " %s_%d values "%(stbname,i) + for j in range(rowCount): + sql += "(%d, %d, 'taos_%d') "%(ts_start + j*1000, j, j) + if j >0 and j%5000 == 0: + # print(sql) + tdSql.execute(sql) + sql = "insert into %s_%d values " %(stbname,i) + # end sql + if sql != pre_insert: + # print(sql) + tdSql.execute(sql) + exeEndTime=time.time() + spendTime=exeEndTime-exeStartTime + speedInsert=allRows/spendTime + # tdLog.debug("spent %.2fs to INSERT %d rows , insert rate is %.2f rows/s... [OK]"% (spendTime,allRows,speedInsert)) + + tdLog.debug("INSERT TABLE DATA ............ [OK]") + return + + + # test case1 base + def test_case1(self): + tdLog.debug("-----create database and tables test------- ") + # tdSql.execute("drop database if exists db1") + # tdSql.execute("drop database if exists db4") + # tdSql.execute("drop database if exists db6") + # tdSql.execute("drop database if exists db8") + # tdSql.execute("drop database if exists db12") + # tdSql.execute("drop database if exists db16") + + #create database and tables; + + # tdSql.execute("create database db11 vgroups 1") + # # self.create_tables("db1", "stb1", 30*10000) + # tdSql.execute("use db1") + # tdSql.execute("create stable stb1(ts timestamp, c1 int, c2 binary(10)) tags(t1 int)") + + # tdSql.execute("create database db12 vgroups 1") + # # self.create_tables("db1", "stb1", 30*10000) + # tdSql.execute("use db1") + + # t1 = threading.Thread(target=self.new_create_tables("db1", "stb1", 15*10000), args=(1,)) + # t2 = threading.Thread(target=self.new_create_tables("db1", "stb1", 15*10000), args=(2,)) + # t1 = mp.Process(target=self.new_create_tables, args=("db1", "stb1", 0,count/2,)) + # t2 = mp.Process(target=self.new_create_tables, args=("db1", "stb1", count/2,count,)) + + count=500000 + vgroups=1 + threads = [] + threadNumbers=2 + for i in range(threadNumbers): + threads.append(mp.Process(target=self.new_create_tables, args=("db1%d"%i, vgroups, "stb1", 0,count,))) + start_time = time.time() + for tr in threads: + tr.start() + for tr in threads: + tr.join() + end_time = time.time() + spendTime=end_time-start_time + speedCreate=count/spendTime + tdLog.debug("spent %.2fs to create 1 stable and %d table, create speed is %.2f table/s... [OK]"% (spendTime,count,speedCreate)) + # self.new_create_tables("db1", "stb1", 15*10000) + # self.new_create_tables("db1", "stb1", 15*10000) + + # tdSql.execute("create database db4 vgroups 4") + # self.create_tables("db4", "stb4", 30*10000) + + # tdSql.execute("create database db6 vgroups 6") + # self.create_tables("db6", "stb6", 30*10000) + + # tdSql.execute("create database db8 vgroups 8") + # self.create_tables("db8", "stb8", 30*10000) + + # tdSql.execute("create database db12 vgroups 12") + # self.create_tables("db12", "stb12", 30*10000) + + # tdSql.execute("create database db16 vgroups 16") + # self.create_tables("db16", "stb16", 30*10000) + return + + # test case2 base:insert data + def test_case2(self): + + tdLog.debug("-----insert data test------- ") + # drop database + tdSql.execute("drop database if exists db1") + tdSql.execute("drop database if exists db4") + tdSql.execute("drop database if exists db6") + tdSql.execute("drop database if exists db8") + tdSql.execute("drop database if exists db12") + tdSql.execute("drop database if exists db16") + + #create database and tables; + + tdSql.execute("create database db1 vgroups 1") + self.create_tables("db1", "stb1", 1*100) + self.insert_data("db1", "stb1", self.ts, 1*50,1*10000) + + + tdSql.execute("create database db4 vgroups 4") + self.create_tables("db4", "stb4", 1*100) + self.insert_data("db4", "stb4", self.ts, 1*100,1*10000) + + tdSql.execute("create database db6 vgroups 6") + self.create_tables("db6", "stb6", 1*100) + self.insert_data("db6", "stb6", self.ts, 1*100,1*10000) + + tdSql.execute("create database db8 vgroups 8") + self.create_tables("db8", "stb8", 1*100) + self.insert_data("db8", "stb8", self.ts, 1*100,1*10000) + + tdSql.execute("create database db12 vgroups 12") + self.create_tables("db12", "stb12", 1*100) + self.insert_data("db12", "stb12", self.ts, 1*100,1*10000) + + tdSql.execute("create database db16 vgroups 16") + self.create_tables("db16", "stb16", 1*100) + self.insert_data("db16", "stb16", self.ts, 1*100,1*10000) + + return + +# +# add case with filename +# +# tdCases.addWindows(__file__, TDTestCase()) +# tdCases.addLinux(__file__, TDTestCase()) +case=TDTestCase() +case.test_case1() \ No newline at end of file diff --git a/tests/system-test/2-query/cast.py b/tests/system-test/2-query/cast.py index 0e849410b7b6ca29a739c4309d6123d5c9508ede..e07ab95f456be87536b65a96af63207426c140f0 100644 --- a/tests/system-test/2-query/cast.py +++ b/tests/system-test/2-query/cast.py @@ -20,7 +20,7 @@ class TDTestCase: __sql = f"select cast({col_name} as bigint), {col_name} from {tbname}" tdSql.query(sql=__sql) data_tb_col = [result[1] for result in tdSql.queryResult] - for i in range(len(tdSql.queryRows)): + for i in range(tdSql.queryRows): tdSql.checkData( i, 0, None ) if data_tb_col[i] is None else tdSql.checkData( i, 0, int(data_tb_col[i]) ) def __range_to_bigint(self,cols,tables): @@ -32,7 +32,7 @@ class TDTestCase: __sql = f"select cast({col_name} as timestamp), {col_name} from {tbname}" tdSql.query(sql=__sql) data_tb_col = [result[1] for result in tdSql.queryResult] - for i in range(len(tdSql.queryRows)): + for i in range(tdSql.queryRows): if data_tb_col[i] is None: tdSql.checkData( i, 0 , None ) if col_name not in ["c2", "double"] or tbname != "t1" or i != 10: @@ -597,37 +597,37 @@ class TDTestCase: tdLog.printNoPrefix("==========step39: cast constant operation to bigint, expect change to int ") tdSql.query("select cast(12121.23323131 as bigint) as b from ct4") - ( tdSql.checkData(i, 0, 12121) for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, 12121) for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 as binary(16)) as b from ct4") - ( tdSql.checkData(i, 0, '12121.233231') for i in range(len(tdSql.queryRows)) ) + ( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 as binary(2)) as b from ct4") - ( tdSql.checkData(i, 0, '12') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 as nchar(16)) as b from ct4") - ( tdSql.checkData(i, 0, '12121.233231') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 as nchar(2)) as b from ct4") - ( tdSql.checkData(i, 0, '12') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 321.876897998 as bigint) as b from ct4") - ( tdSql.checkData(i, 0, 12443) for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, 12443) for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 321.876897998 as binary(16)) as b from ct4") - ( tdSql.checkData(i, 0, '12443.110129') for i in range(len(tdSql.queryRows)) ) + ( tdSql.checkData(i, 0, '12443.110129') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 321.876897998 as binary(3)) as b from ct4") - ( tdSql.checkData(i, 0, '124') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '124') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 321.876897998 as nchar(16)) as b from ct4") - ( tdSql.checkData(i, 0, '12443.110129') for i in range(len(tdSql.queryRows)) ) + ( tdSql.checkData(i, 0, '12443.110129') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 321.876897998 as nchar(3)) as b from ct4") - ( tdSql.checkData(i, 0, '124') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '124') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as bigint) as b from ct4") - ( tdSql.checkData(i, 0, 12121) for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, 12121) for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as binary(16)) as b from ct4") - ( tdSql.checkData(i, 0, '12121.233231') for i in range(len(tdSql.queryRows)) ) + ( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as binary(2)) as b from ct4") - ( tdSql.checkData(i, 0, '12') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as nchar(16)) as b from ct4") - ( tdSql.checkData(i, 0, '12121.233231') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '12121.233231') for i in range(tdSql.queryRows) ) tdSql.query("select cast(12121.23323131 + 'test~!@`#$%^&*()}{][;><.,' as nchar(2)) as b from ct4") - ( tdSql.checkData(i, 0, '12') for i in range(len(tdSql.queryRows) ) ) + ( tdSql.checkData(i, 0, '12') for i in range(tdSql.queryRows) ) tdLog.printNoPrefix("==========step40: error cast condition, should return error ") tdSql.error("select cast(c1 as int) as b from ct4") diff --git a/tests/system-test/2-query/char_length.py b/tests/system-test/2-query/char_length.py index e78db3b8b010a08329545af0e927fc946bb9ae33..97d5a5f59a449ed4a923e713d190ce5b679fe5b7 100644 --- a/tests/system-test/2-query/char_length.py +++ b/tests/system-test/2-query/char_length.py @@ -232,13 +232,13 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) + tdDnodes.stop(1) + tdDnodes.start(1) - # tdSql.execute("use db") + tdSql.execute("use db") - # tdLog.printNoPrefix("==========step4:after wal, all check again ") - # self.all_test() + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/concat.py b/tests/system-test/2-query/concat.py new file mode 100644 index 0000000000000000000000000000000000000000..1167b444d2eb6f753a5d662586afb0dfe30dff0b --- /dev/null +++ b/tests/system-test/2-query/concat.py @@ -0,0 +1,287 @@ +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __concat_condition(self): # sourcery skip: extract-method + concat_condition = [] + for char_col in CHAR_COL: + concat_condition.extend( + ( + char_col, + f"upper( {char_col} )", + ) + ) + concat_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) + concat_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) + concat_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + # concat_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + concat_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) + + for num_col in NUM_COL: + concat_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) + + concat_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + + concat_condition.append('''"test1234!@#$%^&*():'> 0 " + return "" + + def __concat_num(self, concat_lists, num): + return [ concat_lists[i] for i in range(num) ] + + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __concat_check(self, tbname, num): + concat_condition = self.__concat_condition() + for i in range(len(concat_condition) - num + 1 ): + condition = self.__concat_num(concat_condition[i:], num) + concat_filter = f"concat( {','.join( condition ) }) " + where_condition = self.__where_condition(condition[0]) + # group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " ) + concat_group_having = self.__group_condition(concat_filter, having=f"{concat_filter} is not null " ) + # group_no_having= self.__group_condition(condition[0] ) + concat_group_no_having= self.__group_condition(concat_filter) + groups = ["", concat_group_having, concat_group_no_having] + + if num > 8 or num < 2 : + [tdSql.error(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + break + + tdSql.query(f"select {','.join(condition)} from {tbname} ") + rows = tdSql.queryRows + concat_data = [] + for m in range(rows): + concat_data.append("".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_data.append(None) + tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} ") + tdSql.checkRows(rows) + for j in range(tdSql.queryRows): + assert tdSql.getData(j, 0) in concat_data + + [ tdSql.query(f"select concat( {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __concat_err_check(self,tbname): + sqls = [] + + for char_col in CHAR_COL: + sqls.extend( + ( + f"select concat( {char_col} ) from {tbname} ", + f"select concat(ceil( {char_col} )) from {tbname} ", + f"select {char_col} from {tbname} group by concat( {char_col} ) ", + ) + ) + + sqls.extend( f"select concat( {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat( {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat( {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat( {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat( {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select concat( {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select concat( {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select concat( {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat( {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select concat({char_col}, 11) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select concat({num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select concat() from {tbname} ", + f"select concat(*) from {tbname} ", + f"select concat(ccccccc) from {tbname} ", + f"select concat(111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + for tb in tbname: + for i in range(2,8): + self.__concat_check(tb,i) + tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + + for tb in tbname: + for errsql in self.__concat_err_check(tb): + tdSql.error(sql=errsql) + self.__concat_check(tb,1) + self.__concat_check(tb,9) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/concat_ws.py b/tests/system-test/2-query/concat_ws.py new file mode 100644 index 0000000000000000000000000000000000000000..876a1c88055b0ab3ca3b1046d180365fc089ae0d --- /dev/null +++ b/tests/system-test/2-query/concat_ws.py @@ -0,0 +1,287 @@ +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __concat_ws_condition(self): # sourcery skip: extract-method + concat_ws_condition = [] + for char_col in CHAR_COL: + concat_ws_condition.extend( + ( + char_col, + f"upper( {char_col} )", + ) + ) + concat_ws_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) + concat_ws_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) + concat_ws_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_ws_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_ws_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + # concat_ws_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + concat_ws_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) + + for num_col in NUM_COL: + concat_ws_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + concat_ws_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) + + concat_ws_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + + concat_ws_condition.append('''"test1234!@#$%^&*():'> 0 " + return "" + + def __concat_ws_num(self, concat_ws_lists, num): + return [ concat_ws_lists[i] for i in range(num) ] + + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __concat_ws_check(self, tbname, num): + concat_ws_condition = self.__concat_ws_condition() + for i in range(len(concat_ws_condition) - num + 1 ): + condition = self.__concat_ws_num(concat_ws_condition[i:], num) + concat_ws_filter = f"concat_ws('_', {','.join( condition ) }) " + where_condition = self.__where_condition(condition[0]) + # group_having = self.__group_condition(condition[0], having=f"{condition[0]} is not null " ) + concat_ws_group_having = self.__group_condition(concat_ws_filter, having=f"{concat_ws_filter} is not null " ) + # group_no_having= self.__group_condition(condition[0] ) + concat_ws_group_no_having= self.__group_condition(concat_ws_filter) + groups = ["", concat_ws_group_having, concat_ws_group_no_having] + + if num > 8 or num < 2 : + [tdSql.error(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + break + + tdSql.query(f"select {','.join(condition)} from {tbname} ") + rows = tdSql.queryRows + concat_ws_data = [] + for m in range(rows): + concat_ws_data.append("_".join(tdSql.queryResult[m])) if tdSql.getData(m, 0) else concat_ws_data.append(None) + tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} ") + tdSql.checkRows(rows) + for j in range(tdSql.queryRows): + assert tdSql.getData(j, 0) in concat_ws_data + + [ tdSql.query(f"select concat_ws('_', {','.join( condition ) }) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __concat_ws_err_check(self,tbname): + sqls = [] + + for char_col in CHAR_COL: + sqls.extend( + ( + f"select concat_ws('_', {char_col} ) from {tbname} ", + f"select concat_ws('_', ceil( {char_col} )) from {tbname} ", + f"select {char_col} from {tbname} group by concat_ws('_', {char_col} ) ", + ) + ) + + sqls.extend( f"select concat_ws('_', {char_col} , {num_col} ) from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat_ws('_', {char_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat_ws('_', {char_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat_ws('_', {ts_col}, {bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat_ws('_', {num_col} , {ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select concat_ws('_', {num_col} , {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select concat_ws('_', {num_col} , {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select concat_ws('_', {ts_col}, {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat_ws('_', {bool_col}, {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select concat_ws('_', {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select concat_ws('_', {char_col}, 11) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select concat_ws('_', {num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select concat_ws('_', {ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select concat_ws('_', {bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select concat_ws('_', {char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select concat_ws('_', ) from {tbname} ", + f"select concat_ws('_', *) from {tbname} ", + f"select concat_ws('_', ccccccc) from {tbname} ", + f"select concat_ws('_', 111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + for tb in tbname: + for i in range(2,8): + self.__concat_ws_check(tb,i) + tdLog.printNoPrefix(f"==========current sql condition check in {tb}, col num: {i} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + + for tb in tbname: + for errsql in self.__concat_ws_err_check(tb): + tdSql.error(sql=errsql) + self.__concat_ws_check(tb,1) + self.__concat_ws_check(tb,9) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/join.py b/tests/system-test/2-query/join.py index a39bc219466a38eb43290a2ee668a8b5c3e2d232..289dd3d62df0b38fc3d6d857ba9bcd22bfd14aae 100644 --- a/tests/system-test/2-query/join.py +++ b/tests/system-test/2-query/join.py @@ -99,6 +99,7 @@ class TDTestCase: if not join_flag : tdSql.error(sql=sql) + break if len(tblist) == 2: if "ct1" in tblist or "t1" in tblist: self.__join_current(sql, checkrows) @@ -111,42 +112,9 @@ class TDTestCase: if len(tblist) > 2 or len(tblist) < 1: tdSql.error(sql=sql) - # def __join_err_check(self,tbname): - # sqls = [] - - # for un_char_col in NUM_COL: - # sqls.extend( - # ( - # f"select length( {un_char_col} ) from {tbname} ", - # f"select length(ceil( {un_char_col} )) from {tbname} ", - # f"select {un_char_col} from {tbname} group by length( {un_char_col} ) ", - # ) - # ) - - # sqls.extend( f"select length( {un_char_col} + {un_char_col_2} ) from {tbname} " for un_char_col_2 in NUM_COL ) - # sqls.extend( f"select length( {un_char_col} + {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) - - # sqls.extend( f"select {char_col} from {tbname} group by length( {char_col} ) " for char_col in CHAR_COL) - # sqls.extend( f"select length( {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) - # sqls.extend( f"select length( {char_col} + {ts_col} ) from {tbname} " for char_col in NUM_COL for ts_col in TS_TYPE_COL) - # sqls.extend( f"select length( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) - # sqls.extend( f"select upper({char_col}, 11) from {tbname} " for char_col in CHAR_COL ) - # sqls.extend( f"select upper({char_col}) from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) - # sqls.extend( - # ( - # f"select length() from {tbname} ", - # f"select length(*) from {tbname} ", - # f"select length(ccccccc) from {tbname} ", - # f"select length(111) from {tbname} ", - # f"select length(c8, 11) from {tbname} ", - # ) - # ) - - # return sqls - def __join_current(self, sql, checkrows): tdSql.query(sql=sql) - tdSql.checkRows(checkrows) + # tdSql.checkRows(checkrows) def __test_current(self): @@ -197,10 +165,10 @@ class TDTestCase: tbname = ["ct1", "ct2", "ct4", "t1"] - for tb in tbname: - for errsql in self.__length_err_check(tb): - tdSql.error(sql=errsql) - tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + # for tb in tbname: + # for errsql in self.__join_err_check(tb): + # tdSql.error(sql=errsql) + # tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") def all_test(self): @@ -319,13 +287,13 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) + tdDnodes.stop(1) + tdDnodes.start(1) - # tdSql.execute("use db") + tdSql.execute("use db") - # tdLog.printNoPrefix("==========step4:after wal, all check again ") - # self.all_test() + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/length.py b/tests/system-test/2-query/length.py index 083bc62c9a8d0411b0db394f991e5fd01ad0474f..ed604c41ae351e9f03e51b4a6f77160cc463529c 100644 --- a/tests/system-test/2-query/length.py +++ b/tests/system-test/2-query/length.py @@ -233,13 +233,13 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) + tdDnodes.stop(1) + tdDnodes.start(1) - # tdSql.execute("use db") + tdSql.execute("use db") - # tdLog.printNoPrefix("==========step4:after wal, all check again ") - # self.all_test() + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/lower.py b/tests/system-test/2-query/lower.py index 5445c37b8a0b5e08998371585f0e293c36378320..0917fb63fc638263849625aec5b907c05260f49f 100644 --- a/tests/system-test/2-query/lower.py +++ b/tests/system-test/2-query/lower.py @@ -59,12 +59,9 @@ class TDTestCase: groups = ["", group_having, group_no_having] for group_condition in groups: - tdSql.query(f"select {condition} from {tbname} {where_condition} {group_condition} ") - datas = [tdSql.getData(i,0) for i in range(tdSql.queryRows)] - lower_data = [ str(data).lower() if data else None for data in datas ] - tdSql.query(f"select lower( {condition} ) from {tbname} {where_condition} {group_condition}") - for i in range(len(lower_data)): - tdSql.checkData(i, 0, lower_data[i] ) if lower_data[i] else tdSql.checkData(i, 0, None) + tdSql.query(f"select lower( {condition} ), {condition} from {tbname} {where_condition} {group_condition}") + for i in range(tdSql.queryRows): + tdSql.checkData(i, 0, str(tdSql.getData(i, 1)).lower() ) if tdSql.getData(i, 1) else tdSql.checkData(i, 0, None) def __lower_err_check(self,tbname): sqls = [] @@ -230,13 +227,13 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) + tdDnodes.stop(1) + tdDnodes.start(1) - # tdSql.execute("use db") + tdSql.execute("use db") - # tdLog.printNoPrefix("==========step4:after wal, all check again ") - # self.all_test() + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() def stop(self): tdSql.close() diff --git a/tests/system-test/2-query/ltrim.py b/tests/system-test/2-query/ltrim.py new file mode 100644 index 0000000000000000000000000000000000000000..15f40a09c3db67e4324e75768532221f55f2e35f --- /dev/null +++ b/tests/system-test/2-query/ltrim.py @@ -0,0 +1,267 @@ +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __ltrim_condition(self): # sourcery skip: extract-method + ltrim_condition = [] + for char_col in CHAR_COL: + ltrim_condition.extend( + ( + char_col, + f"upper( {char_col} )", + ) + ) + ltrim_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) + ltrim_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) + ltrim_condition.extend( f"concat( cast( {char_col} + {num_col} as binary(16) ), {char_col}) " for num_col in NUM_COL ) + ltrim_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + ltrim_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + ltrim_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + # ltrim_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + ltrim_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) + ltrim_condition.extend( f"concat( {char_col}, {char_col_2} ) " for char_col_2 in CHAR_COL ) + + for num_col in NUM_COL: + ltrim_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + ltrim_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) + + ltrim_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + + ltrim_condition.append(''' " test1234!@#$%^&*() :'> 0 " + return "" + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __ltrim_check(self, tbname): + ltrim_condition = self.__ltrim_condition() + for condition in ltrim_condition: + where_condition = self.__where_condition(condition) + ltrim_group_having = self.__group_condition(condition, having=f"{condition} is not null " ) + ltrim_group_no_having= self.__group_condition(condition) + groups = ["", ltrim_group_having, ltrim_group_no_having] + + tdSql.query(f"select ltrim( {condition}) , {condition} from {tbname} ") + for j in range(tdSql.queryRows): + tdSql.checkData(j,0, tdSql.getData(j,1).lstrip()) if tdSql.getData(j,1) else tdSql.checkData(j, 0, None) + + [ tdSql.query(f"select ltrim({condition}) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __ltrim_err_check(self,tbname): + sqls = [] + + for num_col in NUM_COL: + sqls.extend( + ( + f"select ltrim( {num_col} ) from {tbname} ", + f"select ltrim(ceil( {num_col} )) from {tbname} ", + f"select {num_col} from {tbname} group by ltrim( {num_col} ) ", + ) + ) + + sqls.extend( f"select ltrim( {char_col} , {num_col} ) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select ltrim( {num_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select ltrim( {num_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select ltrim( {ts_col}+{bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select ltrim( {num_col}+{ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select ltrim( {num_col}+ {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select ltrim( {num_col}+ {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select ltrim( {ts_col}+{ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select ltrim( {bool_col}+ {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select ltrim( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select ltrim({num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select ltrim({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select ltrim({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select ltrim({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select ltrim() from {tbname} ", + f"select ltrim(*) from {tbname} ", + f"select ltrim(ccccccc) from {tbname} ", + f"select ltrim(111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + for tb in tbname: + self.__ltrim_check(tb) + tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + + for tb in tbname: + for errsql in self.__ltrim_err_check(tb): + tdSql.error(sql=errsql) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/rtrim.py b/tests/system-test/2-query/rtrim.py new file mode 100644 index 0000000000000000000000000000000000000000..30624792cc33866a19c0ec1a31594cdfa438ffcf --- /dev/null +++ b/tests/system-test/2-query/rtrim.py @@ -0,0 +1,267 @@ +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __rtrim_condition(self): # sourcery skip: extract-method + rtrim_condition = [] + for char_col in CHAR_COL: + rtrim_condition.extend( + ( + char_col, + f"upper( {char_col} )", + ) + ) + rtrim_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) + rtrim_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) + rtrim_condition.extend( f"concat( cast( {char_col} + {num_col} as binary(16) ), {char_col}) " for num_col in NUM_COL ) + rtrim_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + rtrim_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + rtrim_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + # rtrim_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + rtrim_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) + rtrim_condition.extend( f"concat( {char_col}, {char_col_2} ) " for char_col_2 in CHAR_COL ) + + for num_col in NUM_COL: + rtrim_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + rtrim_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL ) + + rtrim_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + + rtrim_condition.append(''' " test1234!@#$%^&*() :'> 0 " + return "" + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __rtrim_check(self, tbname): + rtrim_condition = self.__rtrim_condition() + for condition in rtrim_condition: + where_condition = self.__where_condition(condition) + rtrim_group_having = self.__group_condition(condition, having=f"{condition} is not null " ) + rtrim_group_no_having= self.__group_condition(condition) + groups = ["", rtrim_group_having, rtrim_group_no_having] + + tdSql.query(f"select rtrim( {condition}) , {condition} from {tbname} ") + for j in range(tdSql.queryRows): + tdSql.checkData(j,0, tdSql.getData(j,1).rstrip()) if tdSql.getData(j,1) else tdSql.checkData(j, 0, None) + + [ tdSql.query(f"select rtrim({condition}) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __rtrim_err_check(self,tbname): + sqls = [] + + for num_col in NUM_COL: + sqls.extend( + ( + f"select rtrim( {num_col} ) from {tbname} ", + f"select rtrim(ceil( {num_col} )) from {tbname} ", + f"select {num_col} from {tbname} group by rtrim( {num_col} ) ", + ) + ) + + sqls.extend( f"select rtrim( {char_col} , {num_col} ) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select rtrim( {num_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select rtrim( {num_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select rtrim( {ts_col}+{bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select rtrim( {num_col}+{ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select rtrim( {num_col}+ {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select rtrim( {num_col}+ {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select rtrim( {ts_col}+{ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select rtrim( {bool_col}+ {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select rtrim( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select rtrim({num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select rtrim({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select rtrim({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select rtrim({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select rtrim() from {tbname} ", + f"select rtrim(*) from {tbname} ", + f"select rtrim(ccccccc) from {tbname} ", + f"select rtrim(111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + for tb in tbname: + self.__rtrim_check(tb) + tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + + for tb in tbname: + for errsql in self.__rtrim_err_check(tb): + tdSql.error(sql=errsql) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/substr.py b/tests/system-test/2-query/substr.py new file mode 100644 index 0000000000000000000000000000000000000000..e78606826b610223f5ac30f09560e8ec73800891 --- /dev/null +++ b/tests/system-test/2-query/substr.py @@ -0,0 +1,271 @@ +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + + +PRIMARY_COL = "ts" + +INT_COL = "c1" +BINT_COL = "c2" +SINT_COL = "c3" +TINT_COL = "c4" +FLOAT_COL = "c5" +DOUBLE_COL = "c6" +BOOL_COL = "c7" + +BINARY_COL = "c8" +NCHAR_COL = "c9" +TS_COL = "c10" + +NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ] +CHAR_COL = [ BINARY_COL, NCHAR_COL, ] +BOOLEAN_COL = [ BOOL_COL, ] +TS_TYPE_COL = [ TS_COL, ] + + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def __substr_condition(self): # sourcery skip: extract-method + substr_condition = [] + for char_col in CHAR_COL: + substr_condition.extend( + ( + char_col, + f"upper( {char_col} )", + ) + ) + substr_condition.extend( f"cast( {num_col} as binary(16) ) " for num_col in NUM_COL) + substr_condition.extend( f"cast( {char_col} + {num_col} as binary(16) ) " for num_col in NUM_COL ) + substr_condition.extend( f"concat( cast( {char_col} + {num_col} as binary(16) ), {char_col}) " for num_col in NUM_COL ) + substr_condition.extend( f"cast( {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + substr_condition.extend( f"cast( {char_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + substr_condition.extend( f"cast( {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + # substr_condition.extend( f"cast( {char_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL ) + substr_condition.extend( f"cast( {char_col} + {char_col_2} as binary(16) ) " for char_col_2 in CHAR_COL ) + substr_condition.extend( f"concat( {char_col}, {char_col_2} ) " for char_col_2 in CHAR_COL ) + + for num_col in NUM_COL: + substr_condition.extend( f"cast( {num_col} + {bool_col} as binary(16) )" for bool_col in BOOLEAN_COL ) + substr_condition.extend( f"cast( {num_col} + {ts_col} as binary(16) )" for ts_col in TS_TYPE_COL if num_col is not FLOAT_COL and num_col is not DOUBLE_COL) + + substr_condition.extend( f"cast( {bool_col} + {ts_col} as binary(16) )" for bool_col in BOOLEAN_COL for ts_col in TS_TYPE_COL ) + + substr_condition.append(''' " test1234!@#$%^&*() :'> 0 " + return "" + + def __group_condition(self, col, having = ""): + return f" group by {col} having {having}" if having else f" group by {col} " + + def __substr_check(self, tbname,pos, lens=None): + substr_condition = self.__substr_condition() + for condition in substr_condition: + where_condition = self.__where_condition(condition) + substr_group_having = self.__group_condition(condition, having=f"{condition} is not null " ) + substr_group_no_having= self.__group_condition(condition) + groups = ["", substr_group_having, substr_group_no_having] + + if pos < 1: + tdSql.error(f"select substr( {condition}, {pos}, {lens}) , {condition} from {tbname} ") + + tdSql.query(f"select substr( {condition}, {pos}, {lens}) , {condition} from {tbname} ") + for j in range(tdSql.queryRows): + tdSql.checkData(j,0, tdSql.getData(j,1)[pos-1:lens]) if tdSql.getData(j,1) else tdSql.checkData(j, 0, None) + + [ tdSql.query(f"select substr({condition}, {pos}, {lens}) from {tbname} {where_condition} {group} ") for group in groups ] + + + def __substr_err_check(self,tbname): + sqls = [] + + for num_col in NUM_COL: + sqls.extend( + ( + f"select substr( {num_col} ) from {tbname} ", + f"select substr(ceil( {num_col} )) from {tbname} ", + f"select {num_col} from {tbname} group by substr( {num_col} ) ", + ) + ) + + sqls.extend( f"select substr( {char_col} + {num_col} ) from {tbname} " for char_col in CHAR_COL ) + sqls.extend( f"select substr( {num_col} , {ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select substr( {num_col} , {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select substr( {ts_col}+{bool_col} ) from {tbname} " for ts_col in TS_TYPE_COL for bool_col in BOOLEAN_COL ) + sqls.extend( f"select substr( {num_col}+{ts_col} ) from {tbname} " for num_col in NUM_COL for ts_col in TS_TYPE_COL) + sqls.extend( f"select substr( {num_col}+ {bool_col} ) from {tbname} " for num_col in NUM_COL for bool_col in BOOLEAN_COL) + sqls.extend( f"select substr( {num_col}+ {num_col} ) from {tbname} " for num_col in NUM_COL for num_col in NUM_COL) + sqls.extend( f"select substr( {ts_col}+{ts_col} ) from {tbname} " for ts_col in TS_TYPE_COL for ts_col in TS_TYPE_COL ) + sqls.extend( f"select substr( {bool_col}+ {bool_col} ) from {tbname} " for bool_col in BOOLEAN_COL for bool_col in BOOLEAN_COL ) + + sqls.extend( f"select substr( {char_col} + {char_col_2} ) from {tbname} " for char_col in CHAR_COL for char_col_2 in CHAR_COL ) + sqls.extend( f"select substr({num_col}, '1') from {tbname} " for num_col in NUM_COL ) + sqls.extend( f"select substr({ts_col}, '1') from {tbname} " for ts_col in TS_TYPE_COL ) + sqls.extend( f"select substr({bool_col}, '1') from {tbname} " for bool_col in BOOLEAN_COL ) + sqls.extend( f"select substr({char_col},'1') from {tbname} interval(2d) sliding(1d)" for char_col in CHAR_COL ) + sqls.extend( + ( + f"select substr() from {tbname} ", + f"select substr(*) from {tbname} ", + f"select substr(ccccccc) from {tbname} ", + f"select substr(111) from {tbname} ", + ) + ) + + return sqls + + def __test_current(self): # sourcery skip: use-itertools-product + tdLog.printNoPrefix("==========current sql condition check , must return query ok==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + for tb in tbname: + self.__substr_check(tb, 1, 6) + tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========") + + def __test_error(self): + tdLog.printNoPrefix("==========err sql condition check , must return error==========") + tbname = ["ct1", "ct2", "ct4", "t1", "stb1"] + + for tb in tbname: + for errsql in self.__substr_err_check(tb): + tdSql.error(sql=errsql) + self.__substr_check(tb, 0, 6) + tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========") + + + def all_test(self): + self.__test_current() + self.__test_error() + + + def __create_tb(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + create_stb_sql = f'''create table stb1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) tags (t1 int) + ''' + create_ntb_sql = f'''create table t1( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp + ) + ''' + tdSql.execute(create_stb_sql) + tdSql.execute(create_ntb_sql) + + for i in range(4): + tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') + + def __insert_data(self, rows): + now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) + for i in range(rows): + tdSql.execute( + f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )" + ) + tdSql.execute( + f'''insert into ct1 values + ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } ) + ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } ) + ''' + ) + + tdSql.execute( + f'''insert into ct4 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000} + ) + ( + { now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000} + ) + ''' + ) + + tdSql.execute( + f'''insert into ct2 values + ( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( + { now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126, + { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127, + { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + for i in range(rows): + insert_data = f'''insert into t1 values + ( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}, + "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } ) + ''' + tdSql.execute(insert_data) + tdSql.execute( + f'''insert into t1 values + ( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) + ( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127, + { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, + "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 } + ) + ( + { now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126, + { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, + "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 } + ) + ''' + ) + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table") + self.__create_tb() + + tdLog.printNoPrefix("==========step2:insert data") + self.rows = 10 + self.__insert_data(self.rows) + + tdLog.printNoPrefix("==========step3:all check") + self.all_test() + + tdDnodes.stop(1) + tdDnodes.start(1) + + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/upper.py b/tests/system-test/2-query/upper.py index 3c3fddfb45b29dbc23ceabacae62717ee1155a52..bb485161dd12885175c470e8b5542b1ab011f186 100644 --- a/tests/system-test/2-query/upper.py +++ b/tests/system-test/2-query/upper.py @@ -59,12 +59,9 @@ class TDTestCase: groups = ["", group_having, group_no_having] for group_condition in groups: - tdSql.query(f"select {condition} from {tbname} {where_condition} {group_condition} ") - datas = [tdSql.getData(i,0) for i in range(tdSql.queryRows)] - upper_data = [ str(data).upper() if data else None for data in datas ] - tdSql.query(f"select upper( {condition} ) from {tbname} {where_condition} {group_condition}") - for i in range(len(upper_data)): - tdSql.checkData(i, 0, upper_data[i] ) if upper_data[i] else tdSql.checkData(i, 0, None) + tdSql.query(f"select upper( {condition} ), {condition} from {tbname} {where_condition} {group_condition}") + for i in range(tdSql.queryRows): + tdSql.checkData(i, 0, str(tdSql.getData(i, 1)).upper() ) if tdSql.getData(i, 1) else tdSql.checkData(i, 0, None) def __upper_err_check(self,tbname): sqls = [] @@ -229,13 +226,13 @@ class TDTestCase: tdLog.printNoPrefix("==========step3:all check") self.all_test() - # tdDnodes.stop(1) - # tdDnodes.start(1) + tdDnodes.stop(1) + tdDnodes.start(1) - # tdSql.execute("use db") + tdSql.execute("use db") - # tdLog.printNoPrefix("==========step4:after wal, all check again ") - # self.all_test() + tdLog.printNoPrefix("==========step4:after wal, all check again ") + self.all_test() def stop(self): tdSql.close() diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index cb9d4721169165c78bdd6853de3e1bbf6dad5ab6..f713f707cb8ae954a4371df7cdd800f74e477d39 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -6,11 +6,20 @@ python3 ./test.py -f 0-others/taosShell.py python3 ./test.py -f 0-others/taosShellError.py python3 ./test.py -f 0-others/taosShellNetChk.py python3 ./test.py -f 0-others/telemetry.py - +python3 ./test.py -f 0-others/taosdMonitor.py #python3 ./test.py -f 2-query/between.py python3 ./test.py -f 2-query/distinct.py python3 ./test.py -f 2-query/varchar.py +python3 ./test.py -f 2-query/ltrim.py +python3 ./test.py -f 2-query/rtrim.py +python3 ./test.py -f 2-query/length.py +python3 ./test.py -f 2-query/char_length.py +python3 ./test.py -f 2-query/upper.py +python3 ./test.py -f 2-query/lower.py +python3 ./test.py -f 2-query/join.py +# python3 ./test.py -f 2-query/concat.py # after wal ,crash occured +# python3 ./test.py -f 2-query/concat_ws.py python3 ./test.py -f 2-query/timezone.py python3 ./test.py -f 2-query/Now.py @@ -23,8 +32,7 @@ python3 ./test.py -f 2-query/last.py python3 ./test.py -f 2-query/To_unixtimestamp.py python3 ./test.py -f 2-query/timetruncate.py -python3 ./test.py -f 2-query/Timediff.py -# python3 ./test.py -f 2-query/diff.py +# python3 ./test.py -f 2-query/Timediff.py #python3 ./test.py -f 2-query/cast.py