Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
5dae6da0
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
5dae6da0
编写于
4月 01, 2022
作者:
L
Leo Chen
提交者:
GitHub
4月 01, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[new-exec] move WaitEvent/RecordEvent into try-catch (#41222)
* move WaitEvent/RecordEvent into try-catch * refine supportNpu
上级
e6a19aea
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
60 addition
and
36 deletion
+60
-36
paddle/fluid/framework/new_executor/interpretercore.cc
paddle/fluid/framework/new_executor/interpretercore.cc
+7
-4
paddle/fluid/framework/operator.cc
paddle/fluid/framework/operator.cc
+50
-0
paddle/fluid/framework/operator.h
paddle/fluid/framework/operator.h
+3
-32
未找到文件。
paddle/fluid/framework/new_executor/interpretercore.cc
浏览文件 @
5dae6da0
...
...
@@ -501,7 +501,7 @@ void InterpreterCore::RunInstruction(const Instruction& instr_node) {
}
// for debug nan/inf
if
(
FLAGS_check_nan_inf
)
{
if
(
op_with_kernel
!=
nullptr
&&
FLAGS_check_nan_inf
)
{
VLOG
(
4
)
<<
"Check nan/inf"
;
framework
::
details
::
CheckOpHasNanOrInf
(
*
op
,
*
global_scope_
,
...
...
@@ -542,10 +542,12 @@ void InterpreterCore::ExecuteInstructionList(
if
(
exception_holder_
.
Type
()
!=
"EOF"
)
{
async_work_queue_
->
Cancel
();
}
VLOG
(
4
)
<<
"Cancel ok"
;
PADDLE_ENFORCE_EQ
(
main_thread_blocker_
.
Clear
(),
0
,
platform
::
errors
::
PreconditionNotMet
(
"main_thread_blocker_.Clear() return -1, clear failed"
));
VLOG
(
4
)
<<
"clear ok"
;
exception_holder_
.
ReThrow
();
}
}
...
...
@@ -637,15 +639,18 @@ void InterpreterCore::RunInstructionAsync(
auto
*
op
=
instr_node
.
OpBase
();
platform
::
RecordEvent
instruction_event
(
op
->
Type
(),
platform
::
TracerEventType
::
Operator
,
1
);
interpreter
::
WaitEvent
(
instr_node
,
place_
);
try
{
interpreter
::
WaitEvent
(
instr_node
,
place_
);
RunInstruction
(
instr_node
);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
RecordStreamForGC
(
instr_node
);
#endif
CheckGC
(
instr_node
,
atomic_var_ref
);
interpreter
::
RecordEvent
(
instr_node
,
place_
);
}
catch
(
platform
::
EnforceNotMet
&
ex
)
{
framework
::
InsertCallStackInfo
(
op
->
Type
(),
op
->
Attrs
(),
&
ex
);
exception_holder_
.
Catch
(
std
::
make_exception_ptr
(
std
::
move
(
ex
)));
...
...
@@ -677,8 +682,6 @@ void InterpreterCore::RunInstructionAsync(
}
}
interpreter
::
RecordEvent
(
instr_node
,
place_
);
RunNextInstructions
(
instr_node
,
&
ready_ops
,
atomic_deps
,
atomic_var_ref
);
}
}
...
...
paddle/fluid/framework/operator.cc
浏览文件 @
5dae6da0
...
...
@@ -1120,6 +1120,56 @@ static void CheckTensorNANOrInf(const std::string& op_type,
op_type
,
name
));
}
bool
OperatorWithKernel
::
SupportGPU
()
const
{
auto
phi_kernels
=
phi
::
KernelFactory
::
Instance
().
SelectKernelMap
(
phi
::
TransToPhiKernelName
(
type_
));
auto
has_phi_kernel
=
std
::
any_of
(
phi_kernels
.
begin
(),
phi_kernels
.
end
(),
[](
phi
::
KernelKeyMap
::
const_reference
kern_pair
)
{
return
kern_pair
.
first
.
backend
()
==
phi
::
Backend
::
GPU
;
});
if
(
has_phi_kernel
)
{
return
true
;
}
else
{
auto
kernel_iter
=
OperatorWithKernel
::
AllOpKernels
().
find
(
type_
);
if
(
kernel_iter
==
OperatorWithKernel
::
AllOpKernels
().
end
())
{
return
false
;
}
else
{
auto
&
op_kernels
=
kernel_iter
->
second
;
return
std
::
any_of
(
op_kernels
.
begin
(),
op_kernels
.
end
(),
[](
OpKernelMap
::
const_reference
kern_pair
)
{
return
platform
::
is_gpu_place
(
kern_pair
.
first
.
place_
);
});
}
}
}
bool
OperatorWithKernel
::
SupportNPU
()
const
{
auto
phi_kernels
=
phi
::
KernelFactory
::
Instance
().
SelectKernelMap
(
phi
::
TransToPhiKernelName
(
type_
));
auto
has_phi_kernel
=
std
::
any_of
(
phi_kernels
.
begin
(),
phi_kernels
.
end
(),
[](
phi
::
KernelKeyMap
::
const_reference
kern_pair
)
{
return
kern_pair
.
first
.
backend
()
==
phi
::
Backend
::
NPU
;
});
if
(
has_phi_kernel
)
{
return
true
;
}
else
{
auto
kernel_iter
=
OperatorWithKernel
::
AllOpKernels
().
find
(
type_
);
if
(
kernel_iter
==
OperatorWithKernel
::
AllOpKernels
().
end
())
{
return
false
;
}
else
{
auto
&
op_kernels
=
kernel_iter
->
second
;
return
std
::
any_of
(
op_kernels
.
begin
(),
op_kernels
.
end
(),
[](
OpKernelMap
::
const_reference
kern_pair
)
{
return
platform
::
is_npu_place
(
kern_pair
.
first
.
place_
);
});
}
}
}
bool
OperatorWithKernel
::
SupportsMKLDNN
(
const
proto
::
VarType
::
Type
data_type
)
const
{
auto
op_kernel_iter
=
OperatorWithKernel
::
AllOpKernels
().
find
(
type_
);
...
...
paddle/fluid/framework/operator.h
浏览文件 @
5dae6da0
...
...
@@ -560,39 +560,10 @@ class OperatorWithKernel : public OperatorBase {
return
g_all_op_kernels
;
}
bool
SupportGPU
()
const
override
{
auto
phi_kernels
=
phi
::
KernelFactory
::
Instance
().
SelectKernelMap
(
phi
::
TransToPhiKernelName
(
type_
));
auto
has_phi_kernel
=
std
::
any_of
(
phi_kernels
.
begin
(),
phi_kernels
.
end
(),
[](
phi
::
KernelKeyMap
::
const_reference
kern_pair
)
{
return
kern_pair
.
first
.
backend
()
==
phi
::
Backend
::
GPU
;
});
if
(
has_phi_kernel
)
{
return
true
;
}
else
{
auto
kernel_iter
=
OperatorWithKernel
::
AllOpKernels
().
find
(
type_
);
if
(
kernel_iter
==
OperatorWithKernel
::
AllOpKernels
().
end
())
{
return
false
;
}
else
{
auto
&
op_kernels
=
kernel_iter
->
second
;
return
std
::
any_of
(
op_kernels
.
begin
(),
op_kernels
.
end
(),
[](
OpKernelMap
::
const_reference
kern_pair
)
{
return
platform
::
is_gpu_place
(
kern_pair
.
first
.
place_
);
});
}
}
}
bool
SupportGPU
()
const
override
;
bool
SupportNPU
()
const
override
;
bool
SupportNPU
()
const
override
{
// TODO(zhiqiu): support phi if needed?
auto
&
op_kernels
=
OperatorWithKernel
::
AllOpKernels
().
at
(
type_
);
return
std
::
any_of
(
op_kernels
.
begin
(),
op_kernels
.
end
(),
[](
OpKernelMap
::
const_reference
kern_pair
)
{
return
platform
::
is_npu_place
(
kern_pair
.
first
.
place_
);
});
}
bool
SupportMLU
()
const
override
{
// TODO(zhiqiu): support phi if needed?
auto
&
op_kernels
=
OperatorWithKernel
::
AllOpKernels
().
at
(
type_
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录