Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
571cccd7
T
TDengine
项目概览
taosdata
/
TDengine
大约 2 年 前同步成功
通知
1192
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
571cccd7
编写于
7月 11, 2023
作者:
H
Haojun Liao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor: do some internal refactor.
上级
62a763ea
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
63 addition
and
49 deletion
+63
-49
source/libs/stream/src/streamExec.c
source/libs/stream/src/streamExec.c
+63
-49
未找到文件。
source/libs/stream/src/streamExec.c
浏览文件 @
571cccd7
...
@@ -378,6 +378,56 @@ static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
...
@@ -378,6 +378,56 @@ static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) {
return
TSDB_CODE_SUCCESS
;
return
TSDB_CODE_SUCCESS
;
}
}
// set input
static
void
doSetStreamInputBlock
(
SStreamTask
*
pTask
,
const
void
*
pInput
,
int64_t
*
pCurrentVer
,
const
char
*
id
)
{
void
*
pExecutor
=
pTask
->
exec
.
pExecutor
;
const
SStreamQueueItem
*
pItem
=
pInput
;
if
(
pItem
->
type
==
STREAM_INPUT__GET_RES
)
{
const
SStreamTrigger
*
pTrigger
=
(
const
SStreamTrigger
*
)
pInput
;
qSetMultiStreamInput
(
pExecutor
,
pTrigger
->
pBlock
,
1
,
STREAM_INPUT__DATA_BLOCK
);
}
else
if
(
pItem
->
type
==
STREAM_INPUT__DATA_SUBMIT
)
{
ASSERT
(
pTask
->
info
.
taskLevel
==
TASK_LEVEL__SOURCE
);
const
SStreamDataSubmit
*
pSubmit
=
(
const
SStreamDataSubmit
*
)
pInput
;
qSetMultiStreamInput
(
pExecutor
,
&
pSubmit
->
submit
,
1
,
STREAM_INPUT__DATA_SUBMIT
);
qDebug
(
"s-task:%s set submit blocks as source block completed, %p %p len:%d ver:%"
PRId64
,
id
,
pSubmit
,
pSubmit
->
submit
.
msgStr
,
pSubmit
->
submit
.
msgLen
,
pSubmit
->
submit
.
ver
);
ASSERT
((
*
pCurrentVer
)
<
pSubmit
->
submit
.
ver
);
(
*
pCurrentVer
)
=
pSubmit
->
submit
.
ver
;
}
else
if
(
pItem
->
type
==
STREAM_INPUT__DATA_BLOCK
||
pItem
->
type
==
STREAM_INPUT__DATA_RETRIEVE
)
{
const
SStreamDataBlock
*
pBlock
=
(
const
SStreamDataBlock
*
)
pInput
;
SArray
*
pBlockList
=
pBlock
->
blocks
;
int32_t
numOfBlocks
=
taosArrayGetSize
(
pBlockList
);
qDebug
(
"s-task:%s set sdata blocks as input num:%d, ver:%"
PRId64
,
id
,
numOfBlocks
,
pBlock
->
sourceVer
);
qSetMultiStreamInput
(
pExecutor
,
pBlockList
->
pData
,
numOfBlocks
,
STREAM_INPUT__DATA_BLOCK
);
}
else
if
(
pItem
->
type
==
STREAM_INPUT__MERGED_SUBMIT
)
{
const
SStreamMergedSubmit
*
pMerged
=
(
const
SStreamMergedSubmit
*
)
pInput
;
SArray
*
pBlockList
=
pMerged
->
submits
;
int32_t
numOfBlocks
=
taosArrayGetSize
(
pBlockList
);
qDebug
(
"s-task:%s %p set (merged) submit blocks as a batch, numOfBlocks:%d, ver:%"
PRId64
,
id
,
pTask
,
numOfBlocks
,
pMerged
->
ver
);
qSetMultiStreamInput
(
pExecutor
,
pBlockList
->
pData
,
numOfBlocks
,
STREAM_INPUT__MERGED_SUBMIT
);
ASSERT
((
*
pCurrentVer
)
<
pMerged
->
ver
);
(
*
pCurrentVer
)
=
pMerged
->
ver
;
}
else
if
(
pItem
->
type
==
STREAM_INPUT__REF_DATA_BLOCK
)
{
const
SStreamRefDataBlock
*
pRefBlock
=
(
const
SStreamRefDataBlock
*
)
pInput
;
qSetMultiStreamInput
(
pExecutor
,
pRefBlock
->
pBlock
,
1
,
STREAM_INPUT__DATA_BLOCK
);
}
else
if
(
pItem
->
type
==
STREAM_INPUT__CHECKPOINT
)
{
const
SStreamCheckpoint
*
pCheckpoint
=
(
const
SStreamCheckpoint
*
)
pInput
;
qSetMultiStreamInput
(
pExecutor
,
pCheckpoint
->
pBlock
,
1
,
STREAM_INPUT__CHECKPOINT
);
}
else
{
ASSERT
(
0
);
}
}
/**
/**
* todo: the batch of blocks should be tuned dynamic, according to the total elapsed time of each batch of blocks, the
* todo: the batch of blocks should be tuned dynamic, according to the total elapsed time of each batch of blocks, the
* appropriate batch of blocks should be handled in 5 to 10 sec.
* appropriate batch of blocks should be handled in 5 to 10 sec.
...
@@ -418,52 +468,12 @@ int32_t streamExecForAll(SStreamTask* pTask) {
...
@@ -418,52 +468,12 @@ int32_t streamExecForAll(SStreamTask* pTask) {
}
}
int64_t
st
=
taosGetTimestampMs
();
int64_t
st
=
taosGetTimestampMs
();
qDebug
(
"s-task:%s start to process batch of blocks, num:%d"
,
id
,
batchSize
);
int64_t
currentVer
=
pTask
->
chkInfo
.
currentVer
;
{
const
SStreamQueueItem
*
pItem
=
pInput
;
// set input
qDebug
(
"s-task:%s start to process batch of blocks, num:%d, type:%d"
,
id
,
batchSize
,
pItem
->
type
);
void
*
pExecutor
=
pTask
->
exec
.
pExecutor
;
int64_t
currentVer
=
pTask
->
chkInfo
.
currentVer
;
const
SStreamQueueItem
*
pItem
=
pInput
;
doSetStreamInputBlock
(
pTask
,
pInput
,
&
currentVer
,
id
);
if
(
pItem
->
type
==
STREAM_INPUT__GET_RES
)
{
const
SStreamTrigger
*
pTrigger
=
(
const
SStreamTrigger
*
)
pInput
;
qSetMultiStreamInput
(
pExecutor
,
pTrigger
->
pBlock
,
1
,
STREAM_INPUT__DATA_BLOCK
);
}
else
if
(
pItem
->
type
==
STREAM_INPUT__DATA_SUBMIT
)
{
ASSERT
(
pTask
->
info
.
taskLevel
==
TASK_LEVEL__SOURCE
);
const
SStreamDataSubmit
*
pSubmit
=
(
const
SStreamDataSubmit
*
)
pInput
;
qSetMultiStreamInput
(
pExecutor
,
&
pSubmit
->
submit
,
1
,
STREAM_INPUT__DATA_SUBMIT
);
qDebug
(
"s-task:%s set submit blocks as source block completed, %p %p len:%d ver:%"
PRId64
,
id
,
pSubmit
,
pSubmit
->
submit
.
msgStr
,
pSubmit
->
submit
.
msgLen
,
pSubmit
->
submit
.
ver
);
ASSERT
(
currentVer
<
pSubmit
->
submit
.
ver
);
currentVer
=
pSubmit
->
submit
.
ver
;
}
else
if
(
pItem
->
type
==
STREAM_INPUT__DATA_BLOCK
||
pItem
->
type
==
STREAM_INPUT__DATA_RETRIEVE
)
{
const
SStreamDataBlock
*
pBlock
=
(
const
SStreamDataBlock
*
)
pInput
;
SArray
*
pBlockList
=
pBlock
->
blocks
;
int32_t
numOfBlocks
=
taosArrayGetSize
(
pBlockList
);
qDebug
(
"s-task:%s set sdata blocks as input num:%d, ver:%"
PRId64
,
id
,
numOfBlocks
,
pBlock
->
sourceVer
);
qSetMultiStreamInput
(
pExecutor
,
pBlockList
->
pData
,
numOfBlocks
,
STREAM_INPUT__DATA_BLOCK
);
}
else
if
(
pItem
->
type
==
STREAM_INPUT__MERGED_SUBMIT
)
{
const
SStreamMergedSubmit
*
pMerged
=
(
const
SStreamMergedSubmit
*
)
pInput
;
SArray
*
pBlockList
=
pMerged
->
submits
;
int32_t
numOfBlocks
=
taosArrayGetSize
(
pBlockList
);
qDebug
(
"s-task:%s %p set (merged) submit blocks as a batch, numOfBlocks:%d, ver:%"
PRId64
,
id
,
pTask
,
numOfBlocks
,
pMerged
->
ver
);
qSetMultiStreamInput
(
pExecutor
,
pBlockList
->
pData
,
numOfBlocks
,
STREAM_INPUT__MERGED_SUBMIT
);
ASSERT
(
currentVer
<
pMerged
->
ver
);
currentVer
=
pMerged
->
ver
;
}
else
if
(
pItem
->
type
==
STREAM_INPUT__REF_DATA_BLOCK
)
{
const
SStreamRefDataBlock
*
pRefBlock
=
(
const
SStreamRefDataBlock
*
)
pInput
;
qSetMultiStreamInput
(
pExecutor
,
pRefBlock
->
pBlock
,
1
,
STREAM_INPUT__DATA_BLOCK
);
}
else
if
(
pItem
->
type
==
STREAM_INPUT__CHECKPOINT
)
{
const
SStreamCheckpoint
*
pCheckpoint
=
(
const
SStreamCheckpoint
*
)
pInput
;
qSetMultiStreamInput
(
pExecutor
,
pCheckpoint
->
pBlock
,
1
,
STREAM_INPUT__CHECKPOINT
);
}
else
{
ASSERT
(
0
);
}
}
int64_t
resSize
=
0
;
int64_t
resSize
=
0
;
int32_t
totalBlocks
=
0
;
int32_t
totalBlocks
=
0
;
...
@@ -523,7 +533,6 @@ int32_t streamTryExec(SStreamTask* pTask) {
...
@@ -523,7 +533,6 @@ int32_t streamTryExec(SStreamTask* pTask) {
return
-
1
;
return
-
1
;
}
}
// todo the task should be commit here
atomic_store_8
(
&
pTask
->
status
.
schedStatus
,
TASK_SCHED_STATUS__INACTIVE
);
atomic_store_8
(
&
pTask
->
status
.
schedStatus
,
TASK_SCHED_STATUS__INACTIVE
);
qDebug
(
"s-task:%s exec completed, status:%s, sched-status:%d"
,
pTask
->
id
.
idStr
,
qDebug
(
"s-task:%s exec completed, status:%s, sched-status:%d"
,
pTask
->
id
.
idStr
,
streamGetTaskStatusStr
(
pTask
->
status
.
taskStatus
),
pTask
->
status
.
schedStatus
);
streamGetTaskStatusStr
(
pTask
->
status
.
taskStatus
),
pTask
->
status
.
schedStatus
);
...
@@ -551,17 +560,22 @@ int32_t streamTryExec(SStreamTask* pTask) {
...
@@ -551,17 +560,22 @@ int32_t streamTryExec(SStreamTask* pTask) {
if
(
code
==
TSDB_CODE_SUCCESS
)
{
if
(
code
==
TSDB_CODE_SUCCESS
)
{
taosWLockLatch
(
&
pTask
->
pMeta
->
lock
);
taosWLockLatch
(
&
pTask
->
pMeta
->
lock
);
ASSERT
(
pTask
->
chkInfo
.
keptCheckpointId
<
pTask
->
checkpointingId
);
pTask
->
chkInfo
.
keptCheckpointId
=
pTask
->
checkpointingId
;
streamMetaSaveTask
(
pTask
->
pMeta
,
pTask
);
streamMetaSaveTask
(
pTask
->
pMeta
,
pTask
);
if
(
streamMetaCommit
(
pTask
->
pMeta
)
<
0
)
{
if
(
streamMetaCommit
(
pTask
->
pMeta
)
<
0
)
{
taosWUnLockLatch
(
&
pTask
->
pMeta
->
lock
);
taosWUnLockLatch
(
&
pTask
->
pMeta
->
lock
);
qError
(
"s-task:%s failed to commit stream meta, since %s"
,
pTask
->
id
.
idStr
,
terrstr
());
qError
(
"s-task:%s failed to commit stream meta after do checkpoint, checkpointId:%"
PRId64
", ver:%"
PRId64
", since %s"
,
pTask
->
id
.
idStr
,
pTask
->
chkInfo
.
keptCheckpointId
,
pTask
->
chkInfo
.
version
,
terrstr
());
return
-
1
;
return
-
1
;
}
else
{
}
else
{
taosWUnLockLatch
(
&
pTask
->
pMeta
->
lock
);
taosWUnLockLatch
(
&
pTask
->
pMeta
->
lock
);
qDebug
(
"s-task:%s commit after checkpoint generating"
,
pTask
->
id
.
idStr
);
}
}
qInfo
(
"vgId:%d s-task:%s commit task status after checkpoint completed"
,
pMeta
->
vgId
,
pTask
->
id
.
idStr
);
qInfo
(
"vgId:%d s-task:%s commit task status after checkpoint completed, checkpointId:%"
PRId64
", ver:%"
PRId64
,
pMeta
->
vgId
,
pTask
->
id
.
idStr
,
pTask
->
chkInfo
.
keptCheckpointId
,
pTask
->
chkInfo
.
version
);
}
else
{
}
else
{
// todo: let's retry send rsp to upstream/mnode
// todo: let's retry send rsp to upstream/mnode
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录