Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
86031c19
T
TDengine
项目概览
taosdata
/
TDengine
1 年多 前同步成功
通知
1185
Star
22016
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
86031c19
编写于
5月 13, 2022
作者:
H
Haojun Liao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix(query): add the repeat scan flag check during aggregate executor.
上级
38d52c69
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
87 addition
and
55 deletion
+87
-55
include/libs/function/function.h
include/libs/function/function.h
+1
-1
source/libs/executor/src/executorimpl.c
source/libs/executor/src/executorimpl.c
+25
-24
source/libs/executor/src/scanoperator.c
source/libs/executor/src/scanoperator.c
+48
-17
source/libs/function/src/builtinsimpl.c
source/libs/function/src/builtinsimpl.c
+1
-1
source/libs/function/src/taggfunction.c
source/libs/function/src/taggfunction.c
+12
-12
未找到文件。
include/libs/function/function.h
浏览文件 @
86031c19
...
...
@@ -173,6 +173,7 @@ typedef struct SqlFunctionCtx {
SInputColumnInfoData
input
;
SResultDataInfo
resDataInfo
;
uint32_t
order
;
// data block scanner order: asc|desc
uint8_t
scanFlag
;
// record current running step, default: 0
////////////////////////////////////////////////////////////////
int32_t
startRow
;
// start row index
int32_t
size
;
// handled processed row number
...
...
@@ -183,7 +184,6 @@ typedef struct SqlFunctionCtx {
bool
hasNull
;
// null value exist in current block, TODO remove it
bool
requireNull
;
// require null in some function, TODO remove it
int32_t
columnIndex
;
// TODO remove it
uint8_t
currentStage
;
// record current running step, default: 0
bool
isAggSet
;
int64_t
startTs
;
// timestamp range of current query when function is executed on a specific data block, TODO remove it
bool
stableQuery
;
...
...
source/libs/executor/src/executorimpl.c
浏览文件 @
86031c19
...
...
@@ -746,7 +746,7 @@ static int32_t doSetInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCt
pCtx
[
i
].
order
=
order
;
pCtx
[
i
].
size
=
pBlock
->
info
.
rows
;
pCtx
[
i
].
pSrcBlock
=
pBlock
;
pCtx
[
i
].
currentStage
=
scanFlag
;
pCtx
[
i
].
scanFlag
=
scanFlag
;
SInputColumnInfoData
*
pInput
=
&
pCtx
[
i
].
input
;
pInput
->
uid
=
pBlock
->
info
.
uid
;
...
...
@@ -826,23 +826,22 @@ static int32_t doSetInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCt
return
code
;
}
static
void
doAggregateImpl
(
SOperatorInfo
*
pOperator
,
TSKEY
startTs
,
SqlFunctionCtx
*
pCtx
)
{
static
int32_t
doAggregateImpl
(
SOperatorInfo
*
pOperator
,
TSKEY
startTs
,
SqlFunctionCtx
*
pCtx
)
{
for
(
int32_t
k
=
0
;
k
<
pOperator
->
numOfExprs
;
++
k
)
{
if
(
functionNeedToExecute
(
&
pCtx
[
k
]))
{
pCtx
[
k
].
startTs
=
startTs
;
// this can be set during create the struct
// todo add a dummy funtion to avoid process check
if
(
pCtx
[
k
].
fpSet
.
process
!=
NULL
)
{
int32_t
code
=
pCtx
[
k
].
fpSet
.
process
(
&
pCtx
[
k
]);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
qError
(
"%s call aggregate function error happens, code : %s"
,
GET_TASKID
(
pOperator
->
pTaskInfo
),
tstrerror
(
code
));
pOperator
->
pTaskInfo
->
code
=
code
;
longjmp
(
pOperator
->
pTaskInfo
->
env
,
code
);
qError
(
"%s aggregate function error happens, code: %s"
,
GET_TASKID
(
pOperator
->
pTaskInfo
),
tstrerror
(
code
));
return
code
;
}
}
}
}
return
TSDB_CODE_SUCCESS
;
}
static
void
setPseudoOutputColInfo
(
SSDataBlock
*
pResult
,
SqlFunctionCtx
*
pCtx
,
SArray
*
pPseudoList
)
{
...
...
@@ -998,18 +997,22 @@ static bool functionNeedToExecute(SqlFunctionCtx* pCtx) {
return
false
;
}
if
(
isRowEntryCompleted
(
pResInfo
)
)
{
return
f
alse
;
if
(
pCtx
->
scanFlag
==
REPEAT_SCAN
)
{
return
f
mIsRepeatScanFunc
(
pCtx
->
functionId
)
;
}
if
(
functionId
==
FUNCTION_FIRST_DST
||
functionId
==
FUNCTION_FIRST
)
{
// return QUERY_IS_ASC_QUERY(pQueryAttr)
;
if
(
isRowEntryCompleted
(
pResInfo
)
)
{
return
false
;
}
// denote the order type
if
((
functionId
==
FUNCTION_LAST_DST
||
functionId
==
FUNCTION_LAST
))
{
// return pCtx->param[0].i == pQueryAttr->order.order;
}
// if (functionId == FUNCTION_FIRST_DST || functionId == FUNCTION_FIRST) {
// // return QUERY_IS_ASC_QUERY(pQueryAttr);
// }
//
// // denote the order type
// if ((functionId == FUNCTION_LAST_DST || functionId == FUNCTION_LAST)) {
// // return pCtx->param[0].i == pQueryAttr->order.order;
// }
// in the reverse table scan, only the following functions need to be executed
// if (IS_REVERSE_SCAN(pRuntimeEnv) ||
...
...
@@ -1944,7 +1947,7 @@ void setFunctionResultOutput(SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t
cleanupResultRowEntry
(
pEntry
);
pCtx
[
i
].
resultInfo
=
pEntry
;
pCtx
[
i
].
currentStage
=
stage
;
pCtx
[
i
].
scanFlag
=
stage
;
// set the timestamp output buffer for top/bottom/diff query
// int32_t fid = pCtx[i].functionId;
...
...
@@ -3724,7 +3727,6 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
SAggOperatorInfo
*
pAggInfo
=
pOperator
->
info
;
SOptrBasicInfo
*
pInfo
=
&
pAggInfo
->
binfo
;
SOperatorInfo
*
downstream
=
pOperator
->
pDownstream
[
0
];
int32_t
order
=
TSDB_ORDER_ASC
;
...
...
@@ -3738,9 +3740,6 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
if
(
pBlock
==
NULL
)
{
break
;
}
// if (pAggInfo->current != NULL) {
// setTagValue(pOperator, pAggInfo->current->pTable, pInfo->pCtx, pOperator->numOfExprs);
// }
int32_t
code
=
getTableScanInfo
(
pOperator
,
&
order
,
&
scanFlag
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
...
...
@@ -3752,15 +3751,17 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
code
=
projectApplyFunctions
(
pAggInfo
->
pScalarExprInfo
,
pBlock
,
pBlock
,
pAggInfo
->
pScalarCtx
,
pAggInfo
->
numOfScalarExpr
,
NULL
);
if
(
code
!=
TSDB_CODE_SUCCESS
)
{
pTaskInfo
->
code
=
code
;
longjmp
(
pTaskInfo
->
env
,
pTaskInfo
->
code
);
longjmp
(
pTaskInfo
->
env
,
code
);
}
}
// the pDataBlock are always the same one, no need to call this again
setExecutionContext
(
pOperator
->
numOfExprs
,
pBlock
->
info
.
groupId
,
pTaskInfo
,
pAggInfo
);
setInputDataBlock
(
pOperator
,
pInfo
->
pCtx
,
pBlock
,
order
,
scanFlag
,
true
);
doAggregateImpl
(
pOperator
,
0
,
pInfo
->
pCtx
);
code
=
doAggregateImpl
(
pOperator
,
0
,
pInfo
->
pCtx
);
if
(
code
!=
0
)
{
longjmp
(
pTaskInfo
->
env
,
code
);
}
#if 0 // test for encode/decode result info
if(pOperator->encodeResultRow){
...
...
source/libs/executor/src/scanoperator.c
浏览文件 @
86031c19
...
...
@@ -260,6 +260,53 @@ static void prepareForDescendingScan(STableScanInfo* pTableScanInfo, SqlFunction
pTableScanInfo
->
cond
.
order
=
TSDB_ORDER_DESC
;
}
static
void
addTagPseudoColumnData
(
STableScanInfo
*
pTableScanInfo
,
SSDataBlock
*
pBlock
)
{
// currently only the tbname pseudo column
if
(
pTableScanInfo
->
numOfPseudoExpr
==
0
)
{
return
;
}
SMetaReader
mr
=
{
0
};
metaReaderInit
(
&
mr
,
pTableScanInfo
->
readHandle
.
meta
,
0
);
metaGetTableEntryByUid
(
&
mr
,
pBlock
->
info
.
uid
);
for
(
int32_t
j
=
0
;
j
<
pTableScanInfo
->
numOfPseudoExpr
;
++
j
)
{
SExprInfo
*
pExpr
=
&
pTableScanInfo
->
pPseudoExpr
[
j
];
int32_t
dstSlotId
=
pExpr
->
base
.
resSchema
.
slotId
;
SColumnInfoData
*
pColInfoData
=
taosArrayGet
(
pBlock
->
pDataBlock
,
dstSlotId
);
colInfoDataEnsureCapacity
(
pColInfoData
,
0
,
pBlock
->
info
.
rows
);
int32_t
functionId
=
pExpr
->
pExpr
->
_function
.
functionId
;
// this is to handle the tbname
if
(
fmIsScanPseudoColumnFunc
(
functionId
))
{
struct
SScalarFuncExecFuncs
fpSet
=
{
0
};
fmGetScalarFuncExecFuncs
(
functionId
,
&
fpSet
);
SColumnInfoData
infoData
=
{
0
};
infoData
.
info
.
type
=
TSDB_DATA_TYPE_BIGINT
;
infoData
.
info
.
bytes
=
sizeof
(
uint64_t
);
colInfoDataEnsureCapacity
(
&
infoData
,
0
,
1
);
colDataAppendInt64
(
&
infoData
,
0
,
&
pBlock
->
info
.
uid
);
SScalarParam
srcParam
=
{
.
numOfRows
=
pBlock
->
info
.
rows
,
.
param
=
pTableScanInfo
->
readHandle
.
meta
,
.
columnData
=
&
infoData
};
SScalarParam
param
=
{.
columnData
=
pColInfoData
};
fpSet
.
process
(
&
srcParam
,
1
,
&
param
);
}
else
{
// these are tags
const
char
*
p
=
metaGetTableTagVal
(
&
mr
.
me
,
pExpr
->
base
.
pParam
[
0
].
pCol
->
colId
);
for
(
int32_t
i
=
0
;
i
<
pBlock
->
info
.
rows
;
++
i
)
{
colDataAppend
(
pColInfoData
,
i
,
p
,
(
p
==
NULL
));
}
}
}
metaReaderClear
(
&
mr
);
}
static
SSDataBlock
*
doTableScanImpl
(
SOperatorInfo
*
pOperator
)
{
STableScanInfo
*
pTableScanInfo
=
pOperator
->
info
;
SSDataBlock
*
pBlock
=
pTableScanInfo
->
pResBlock
;
...
...
@@ -285,23 +332,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
// currently only the tbname pseudo column
if
(
pTableScanInfo
->
numOfPseudoExpr
>
0
)
{
int32_t
dstSlotId
=
pTableScanInfo
->
pPseudoExpr
->
base
.
resSchema
.
slotId
;
SColumnInfoData
*
pColInfoData
=
taosArrayGet
(
pBlock
->
pDataBlock
,
dstSlotId
);
colInfoDataEnsureCapacity
(
pColInfoData
,
0
,
pBlock
->
info
.
rows
);
struct
SScalarFuncExecFuncs
fpSet
;
fmGetScalarFuncExecFuncs
(
pTableScanInfo
->
pPseudoExpr
->
pExpr
->
_function
.
functionId
,
&
fpSet
);
SColumnInfoData
infoData
=
{
0
};
infoData
.
info
.
type
=
TSDB_DATA_TYPE_BIGINT
;
infoData
.
info
.
bytes
=
sizeof
(
uint64_t
);
colInfoDataEnsureCapacity
(
&
infoData
,
0
,
1
);
colDataAppendInt64
(
&
infoData
,
0
,
&
pBlock
->
info
.
uid
);
SScalarParam
srcParam
=
{.
numOfRows
=
pBlock
->
info
.
rows
,
.
param
=
pTableScanInfo
->
readHandle
.
meta
,
.
columnData
=
&
infoData
};
SScalarParam
param
=
{.
columnData
=
pColInfoData
};
fpSet
.
process
(
&
srcParam
,
1
,
&
param
);
addTagPseudoColumnData
(
pTableScanInfo
,
pBlock
);
}
return
pBlock
;
...
...
source/libs/function/src/builtinsimpl.c
浏览文件 @
86031c19
...
...
@@ -1645,7 +1645,7 @@ int32_t percentileFunction(SqlFunctionCtx* pCtx) {
int32_t
type
=
pCol
->
info
.
type
;
SPercentileInfo
*
pInfo
=
GET_ROWCELL_INTERBUF
(
pResInfo
);
if
(
pCtx
->
currentStage
==
REPEAT_SCAN
&&
pInfo
->
stage
==
0
)
{
if
(
pCtx
->
scanFlag
==
REPEAT_SCAN
&&
pInfo
->
stage
==
0
)
{
pInfo
->
stage
+=
1
;
// all data are null, set it completed
...
...
source/libs/function/src/taggfunction.c
浏览文件 @
86031c19
...
...
@@ -37,7 +37,7 @@
#define GET_TRUE_DATA_TYPE() \
int32_t type = 0; \
if (pCtx->
currentStage
== MERGE_STAGE) { \
if (pCtx->
scanFlag
== MERGE_STAGE) { \
type = pCtx->resDataInfo.type; \
assert(pCtx->inputType == TSDB_DATA_TYPE_BINARY); \
} else { \
...
...
@@ -908,7 +908,7 @@ static void avg_func_merge(SqlFunctionCtx *pCtx) {
static
void
avg_finalizer
(
SqlFunctionCtx
*
pCtx
)
{
SResultRowEntryInfo
*
pResInfo
=
GET_RES_INFO
(
pCtx
);
if
(
pCtx
->
currentStage
==
MERGE_STAGE
)
{
if
(
pCtx
->
scanFlag
==
MERGE_STAGE
)
{
assert
(
pCtx
->
inputType
==
TSDB_DATA_TYPE_BINARY
);
if
(
GET_INT64_VAL
(
GET_ROWCELL_INTERBUF
(
pResInfo
))
<=
0
)
{
...
...
@@ -1152,7 +1152,7 @@ static void stddev_function(SqlFunctionCtx *pCtx) {
SResultRowEntryInfo
*
pResInfo
=
GET_RES_INFO
(
pCtx
);
SStddevInfo
*
pStd
=
GET_ROWCELL_INTERBUF
(
pResInfo
);
if
(
pCtx
->
currentStage
==
REPEAT_SCAN
&&
pStd
->
stage
==
0
)
{
if
(
pCtx
->
scanFlag
==
REPEAT_SCAN
&&
pStd
->
stage
==
0
)
{
pStd
->
stage
++
;
avg_finalizer
(
pCtx
);
...
...
@@ -1814,7 +1814,7 @@ static STopBotInfo *getTopBotOutputInfo(SqlFunctionCtx *pCtx) {
SResultRowEntryInfo
*
pResInfo
=
GET_RES_INFO
(
pCtx
);
// only the first_stage_merge is directly written data into final output buffer
if
(
pCtx
->
stableQuery
&&
pCtx
->
currentStage
!=
MERGE_STAGE
)
{
if
(
pCtx
->
stableQuery
&&
pCtx
->
scanFlag
!=
MERGE_STAGE
)
{
return
(
STopBotInfo
*
)
pCtx
->
pOutput
;
}
else
{
// during normal table query and super table at the secondary_stage, result is written to intermediate buffer
return
GET_ROWCELL_INTERBUF
(
pResInfo
);
...
...
@@ -1956,7 +1956,7 @@ static void top_func_merge(SqlFunctionCtx *pCtx) {
for
(
int32_t
i
=
0
;
i
<
pInput
->
num
;
++
i
)
{
int16_t
type
=
(
pCtx
->
resDataInfo
.
type
==
TSDB_DATA_TYPE_FLOAT
)
?
TSDB_DATA_TYPE_DOUBLE
:
pCtx
->
resDataInfo
.
type
;
// do_top_function_add(pOutput, (int32_t)pCtx->param[0].param.i, &pInput->res[i]->v.i, pInput->res[i]->timestamp,
// type, &pCtx->tagInfo, pInput->res[i]->pTags, pCtx->
currentStage
);
// type, &pCtx->tagInfo, pInput->res[i]->pTags, pCtx->
scanFlag
);
}
SET_VAL
(
pCtx
,
pInput
->
num
,
pOutput
->
num
);
...
...
@@ -2013,7 +2013,7 @@ static void bottom_func_merge(SqlFunctionCtx *pCtx) {
for
(
int32_t
i
=
0
;
i
<
pInput
->
num
;
++
i
)
{
int16_t
type
=
(
pCtx
->
resDataInfo
.
type
==
TSDB_DATA_TYPE_FLOAT
)
?
TSDB_DATA_TYPE_DOUBLE
:
pCtx
->
resDataInfo
.
type
;
// do_bottom_function_add(pOutput, (int32_t)pCtx->param[0].param.i, &pInput->res[i]->v.i, pInput->res[i]->timestamp, type,
// &pCtx->tagInfo, pInput->res[i]->pTags, pCtx->
currentStage
);
// &pCtx->tagInfo, pInput->res[i]->pTags, pCtx->
scanFlag
);
}
SET_VAL
(
pCtx
,
pInput
->
num
,
pOutput
->
num
);
...
...
@@ -2073,7 +2073,7 @@ static void percentile_function(SqlFunctionCtx *pCtx) {
SResultRowEntryInfo
*
pResInfo
=
GET_RES_INFO
(
pCtx
);
SPercentileInfo
*
pInfo
=
GET_ROWCELL_INTERBUF
(
pResInfo
);
if
(
pCtx
->
currentStage
==
REPEAT_SCAN
&&
pInfo
->
stage
==
0
)
{
if
(
pCtx
->
scanFlag
==
REPEAT_SCAN
&&
pInfo
->
stage
==
0
)
{
pInfo
->
stage
+=
1
;
// all data are null, set it completed
...
...
@@ -2180,7 +2180,7 @@ static SAPercentileInfo *getAPerctInfo(SqlFunctionCtx *pCtx) {
SResultRowEntryInfo
*
pResInfo
=
GET_RES_INFO
(
pCtx
);
SAPercentileInfo
*
pInfo
=
NULL
;
if
(
pCtx
->
stableQuery
&&
pCtx
->
currentStage
!=
MERGE_STAGE
)
{
if
(
pCtx
->
stableQuery
&&
pCtx
->
scanFlag
!=
MERGE_STAGE
)
{
pInfo
=
(
SAPercentileInfo
*
)
pCtx
->
pOutput
;
}
else
{
pInfo
=
GET_ROWCELL_INTERBUF
(
pResInfo
);
...
...
@@ -2270,7 +2270,7 @@ static void apercentile_finalizer(SqlFunctionCtx *pCtx) {
SResultRowEntryInfo
*
pResInfo
=
GET_RES_INFO
(
pCtx
);
SAPercentileInfo
*
pOutput
=
GET_ROWCELL_INTERBUF
(
pResInfo
);
if
(
pCtx
->
currentStage
==
MERGE_STAGE
)
{
if
(
pCtx
->
scanFlag
==
MERGE_STAGE
)
{
// if (pResInfo->hasResult == DATA_SET_FLAG) { // check for null
// assert(pOutput->pHisto->numOfElems > 0);
//
...
...
@@ -2510,7 +2510,7 @@ static void copy_function(SqlFunctionCtx *pCtx);
static
void
tag_function
(
SqlFunctionCtx
*
pCtx
)
{
SET_VAL
(
pCtx
,
1
,
1
);
if
(
pCtx
->
currentStage
==
MERGE_STAGE
)
{
if
(
pCtx
->
scanFlag
==
MERGE_STAGE
)
{
copy_function
(
pCtx
);
}
else
{
taosVariantDump
(
&
pCtx
->
tag
,
pCtx
->
pOutput
,
pCtx
->
resDataInfo
.
type
,
true
);
...
...
@@ -2966,7 +2966,7 @@ static bool spread_function_setup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pRe
SSpreadInfo
*
pInfo
=
GET_ROWCELL_INTERBUF
(
pResInfo
);
// this is the server-side setup function in client-side, the secondary merge do not need this procedure
if
(
pCtx
->
currentStage
==
MERGE_STAGE
)
{
if
(
pCtx
->
scanFlag
==
MERGE_STAGE
)
{
// pCtx->param[0].param.d = DBL_MAX;
// pCtx->param[3].param.d = -DBL_MAX;
}
else
{
...
...
@@ -3086,7 +3086,7 @@ void spread_function_finalizer(SqlFunctionCtx *pCtx) {
*/
SResultRowEntryInfo
*
pResInfo
=
GET_RES_INFO
(
pCtx
);
if
(
pCtx
->
currentStage
==
MERGE_STAGE
)
{
if
(
pCtx
->
scanFlag
==
MERGE_STAGE
)
{
assert
(
pCtx
->
inputType
==
TSDB_DATA_TYPE_BINARY
);
// if (pResInfo->hasResult != DATA_SET_FLAG) {
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录