提交 9debc281 编写于 作者: S Shengliang Guan

Merge remote-tracking branch 'origin/3.0' into feature/dnode

......@@ -233,7 +233,7 @@ pipeline {
changeRequest()
}
steps {
timeout(time: 20, unit: 'MINUTES'){
timeout(time: 40, unit: 'MINUTES'){
pre_test()
script {
sh '''
......@@ -245,8 +245,9 @@ pipeline {
'''
sh '''
cd ${WKC}/tests/parallel_test
export DEFAULT_RETRY_TIME=2
date
time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME} -l ${WKDIR}/log
timeout 2100 time ./run.sh -e -m /home/m.json -t /tmp/cases.task -b ${BRANCH_NAME} -l ${WKDIR}/log -o 480
'''
}
}
......
......@@ -27,11 +27,13 @@ typedef struct SScalarCtx {
SArray *pBlockList; /* element is SSDataBlock* */
SHashObj *pRes; /* element is SScalarParam */
void *param; // additional parameter (meta actually) for acquire value such as tbname/tags values
SHashObj *udf2Handle;
} SScalarCtx;
#define SCL_DATA_TYPE_DUMMY_HASH 9000
#define SCL_DEFAULT_OP_NUM 10
#define SCL_DEFAULT_UDF_NUM 8
#define SCL_IS_CONST_NODE(_node) ((NULL == (_node)) || (QUERY_NODE_VALUE == (_node)->type) || (QUERY_NODE_NODE_LIST == (_node)->type))
#define SCL_IS_CONST_CALC(_ctx) (NULL == (_ctx)->pBlockList)
......
......@@ -153,6 +153,18 @@ void sclFreeRes(SHashObj *res) {
taosHashCleanup(res);
}
void sclFreeUdfHandles(SHashObj *udf2handle) {
void *pIter = taosHashIterate(udf2handle, NULL);
while (pIter) {
UdfcFuncHandle *handle = (UdfcFuncHandle *)pIter;
if (handle) {
teardownUdf(*handle);
}
pIter = taosHashIterate(udf2handle, pIter);
}
taosHashCleanup(udf2handle);
}
void sclFreeParam(SScalarParam *param) {
if (param->columnData != NULL) {
colDataDestroy(param->columnData);
......@@ -362,18 +374,24 @@ int32_t sclExecFunction(SFunctionNode *node, SScalarCtx *ctx, SScalarParam *outp
if (fmIsUserDefinedFunc(node->funcId)) {
UdfcFuncHandle udfHandle = NULL;
code = setupUdf(node->functionName, &udfHandle);
if (code != 0) {
sclError("fmExecFunction error. setupUdf. function name: %s, code:%d", node->functionName, code);
goto _return;
char* udfName = node->functionName;
if (ctx->udf2Handle) {
UdfcFuncHandle *pHandle = taosHashGet(ctx->udf2Handle, udfName, strlen(udfName));
if (pHandle) {
udfHandle = *pHandle;
}
}
code = callUdfScalarFunc(udfHandle, params, paramNum, output);
if (code != 0) {
sclError("fmExecFunction error. callUdfScalarFunc. function name: %s, udf code:%d", node->functionName, code);
goto _return;
if (udfHandle == NULL) {
code = setupUdf(udfName, &udfHandle);
if (code != 0) {
sclError("fmExecFunction error. setupUdf. function name: %s, code:%d", udfName, code);
goto _return;
}
if (ctx->udf2Handle) {
taosHashPut(ctx->udf2Handle, udfName, strlen(udfName), &udfHandle, sizeof(UdfcFuncHandle));
}
}
code = teardownUdf(udfHandle);
code = callUdfScalarFunc(udfHandle, params, paramNum, output);
if (code != 0) {
sclError("fmExecFunction error. callUdfScalarFunc. function name: %s, udf code:%d", node->functionName, code);
goto _return;
......@@ -891,15 +909,20 @@ int32_t scalarCalculateConstants(SNode *pNode, SNode **pRes) {
SScalarCtx ctx = {0};
ctx.pRes = taosHashInit(SCL_DEFAULT_OP_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
if (NULL == ctx.pRes) {
sclError("taosHashInit failed, num:%d", SCL_DEFAULT_OP_NUM);
sclError("taosHashInit result map failed, num:%d", SCL_DEFAULT_OP_NUM);
SCL_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
ctx.udf2Handle = taosHashInit(SCL_DEFAULT_UDF_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == ctx.udf2Handle) {
sclError("taosHashInit udf to handle map failed, num:%d", SCL_DEFAULT_OP_NUM);
SCL_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
nodesRewriteExprPostOrder(&pNode, sclConstantsRewriter, (void *)&ctx);
SCL_ERR_JRET(ctx.code);
*pRes = pNode;
_return:
sclFreeUdfHandles(ctx.udf2Handle);
sclFreeRes(ctx.pRes);
return code;
}
......@@ -915,10 +938,14 @@ int32_t scalarCalculate(SNode *pNode, SArray *pBlockList, SScalarParam *pDst) {
// TODO: OPT performance
ctx.pRes = taosHashInit(SCL_DEFAULT_OP_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
if (NULL == ctx.pRes) {
sclError("taosHashInit failed, num:%d", SCL_DEFAULT_OP_NUM);
sclError("taosHashInit result map failed, num:%d", SCL_DEFAULT_OP_NUM);
SCL_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
ctx.udf2Handle = taosHashInit(SCL_DEFAULT_UDF_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
if (NULL == ctx.udf2Handle) {
sclError("taosHashInit udf to handle map failed, num:%d", SCL_DEFAULT_OP_NUM);
SCL_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
nodesWalkExprPostOrder(pNode, sclCalcWalker, (void *)&ctx);
SCL_ERR_JRET(ctx.code);
......@@ -936,6 +963,7 @@ int32_t scalarCalculate(SNode *pNode, SArray *pBlockList, SScalarParam *pDst) {
_return:
//nodesDestroyNode(pNode);
sclFreeUdfHandles(ctx.udf2Handle);
sclFreeRes(ctx.pRes);
return code;
}
Subproject commit 0ae9f872c26d5da8cb61aa9eb00b5c7aeba10ec4
Subproject commit 0aad27d725f4ee6b18daf1db0c07d933aed16eea
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册