Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
7c555f4e
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
7c555f4e
编写于
3月 31, 2022
作者:
0
0x45f
提交者:
GitHub
3月 31, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix test_run_program_op.py (#41141)
上级
1faefc93
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
34 addition
and
28 deletion
+34
-28
paddle/fluid/eager/to_static/run_program_op_node.h
paddle/fluid/eager/to_static/run_program_op_node.h
+8
-3
python/paddle/fluid/tests/unittests/test_run_program_op.py
python/paddle/fluid/tests/unittests/test_run_program_op.py
+26
-25
未找到文件。
paddle/fluid/eager/to_static/run_program_op_node.h
浏览文件 @
7c555f4e
...
...
@@ -185,7 +185,13 @@ inline void RunProgramAPI(
VLOG
(
2
)
<<
"RunProgramOpKernel Compute"
;
auto
start_op_index
=
BOOST_GET_CONST
(
int64_t
,
attrs
.
at
(
"start_op_index"
));
auto
end_op_index
=
BOOST_GET_CONST
(
int64_t
,
attrs
.
at
(
"end_op_index"
));
auto
is_test
=
BOOST_GET_CONST
(
bool
,
attrs
.
at
(
"is_test"
));
// In the original run_program OP, the default value of the is_test
// attribute is false, we should check if there is is_test parameter
// in attrs
auto
is_test
=
false
;
if
(
attrs
.
count
(
"is_test"
))
{
is_test
=
BOOST_GET_CONST
(
bool
,
attrs
.
at
(
"is_test"
));
}
auto
program_id
=
BOOST_GET_CONST
(
int64_t
,
attrs
.
at
(
"program_id"
));
// NOTE(chenweihang): In order not to add new variable type, use vector
...
...
@@ -447,12 +453,11 @@ class GradNodeRunProgram : public egr::GradNodeBase {
const
std
::
vector
<
paddle
::
experimental
::
Tensor
>
&
param
,
std
::
vector
<
paddle
::
experimental
::
Tensor
>
*
param_grad
)
{
for
(
auto
&
t
:
param
)
{
auto
t_meta
=
egr
::
EagerUtils
::
unsafe_autograd_meta
(
t
);
auto
t_grad
=
egr
::
EagerUtils
::
unsafe_autograd_meta
(
t
)
->
Grad
();
// In eager mode, the number of param_grad should be the same as
// param, so here an empty Tensor is added for the param with
// stop_gradient=True
if
(
t_meta
->
StopGradient
())
{
if
(
!
t_grad
.
defined
())
{
param_grad
->
emplace_back
();
}
else
if
(
t_grad
.
is_dense_tensor
())
{
param_grad
->
emplace_back
(
std
::
make_shared
<
phi
::
DenseTensor
>
());
...
...
python/paddle/fluid/tests/unittests/test_run_program_op.py
浏览文件 @
7c555f4e
...
...
@@ -20,10 +20,12 @@ import numpy as np
import
six
import
paddle
from
paddle
import
_C_ops
import
paddle.fluid
as
fluid
from
paddle
import
compat
as
cpt
from
paddle.fluid
import
core
,
framework
,
executor
from
paddle.fluid.layers.utils
import
_hash_with_id
from
paddle.fluid.framework
import
_in_eager_mode_
paddle
.
enable_static
()
...
...
@@ -95,11 +97,9 @@ class RunProgramOpTest(unittest.TestCase):
return
fluid
.
default_main_program
().
desc
,
fwd_op_num
def
prepare_attrs
(
self
):
return
{
'global_block'
:
self
.
program_desc
.
block
(
0
),
'start_op_index'
:
0
,
'end_op_index'
:
self
.
fwd_op_num
}
return
(
'global_block'
,
self
.
program_desc
.
block
(
0
),
'start_op_index'
,
0
,
'end_op_index'
,
self
.
fwd_op_num
,
'program_id'
,
_hash_with_id
(
self
.
program_desc
))
def
get_param_grad_names
(
self
):
grad_names
=
[]
...
...
@@ -127,8 +127,12 @@ class RunProgramOpTest(unittest.TestCase):
def
prepare_dygraph_input
(
self
,
place
,
return_param_list
=
False
):
def
create_var_base
(
is_input
,
name
,
np_value
,
stop_gradient
):
var
=
core
.
VarBase
(
value
=
np_value
,
name
=
name
,
place
=
place
,
zero_copy
=
True
)
if
_in_eager_mode_
:
var
=
core
.
eager
.
Tensor
(
value
=
np_value
,
name
=
name
,
place
=
place
,
zero_copy
=
True
)
else
:
var
=
core
.
VarBase
(
value
=
np_value
,
name
=
name
,
place
=
place
,
zero_copy
=
True
)
var
.
stop_gradient
=
stop_gradient
return
var
...
...
@@ -162,12 +166,15 @@ class RunProgramOpTest(unittest.TestCase):
for
name
in
self
.
output_names
[
'Out'
]:
outputs
[
'Out'
].
append
(
create_var_base
(
False
,
name
))
outputs
[
'OutScope'
]
=
framework
.
_varbase_creator
(
type
=
core
.
VarDesc
.
VarType
.
STEP_SCOPES
,
name
=
"program_out_scope"
,
persistable
=
True
)
inner_scope
=
core
.
Scope
()
outputs
[
'OutScope'
].
value
().
set_scope
(
inner_scope
)
if
_in_eager_mode_
:
outputs
[
'OutScope'
]
=
[
core
.
Scope
()]
else
:
outputs
[
'OutScope'
]
=
framework
.
_varbase_creator
(
type
=
core
.
VarDesc
.
VarType
.
STEP_SCOPES
,
name
=
"program_out_scope"
,
persistable
=
True
)
inner_scope
=
core
.
Scope
()
outputs
[
'OutScope'
].
value
().
set_scope
(
inner_scope
)
outputs
[
'DOut'
]
=
[
create_var_base
(
False
,
"Fake_var"
)]
return
outputs
...
...
@@ -175,34 +182,28 @@ class RunProgramOpTest(unittest.TestCase):
def
calc_dygraph_output
(
self
,
place
):
self
.
program_desc
,
self
.
fwd_op_num
=
self
.
get_program_desc
()
self
.
attrs
=
self
.
prepare_attrs
()
self
.
attrs
[
'program_id'
]
=
_hash_with_id
(
self
.
program_desc
)
with
fluid
.
dygraph
.
guard
(
place
):
inputs
=
self
.
prepare_dygraph_input
(
place
)
outputs
=
self
.
prepare_dygraph_output
()
framework
.
_dygraph_tracer
().
trace_op
(
type
=
self
.
op_type
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
self
.
attrs
)
_C_ops
.
run_program
(
inputs
[
'X'
],
inputs
[
'Params'
],
outputs
[
'Out'
],
outputs
[
'OutScope'
],
outputs
[
'DOut'
],
*
self
.
attrs
)
return
outputs
[
'Out'
]
def
calc_dygraph_grad
(
self
,
place
):
self
.
program_desc
,
self
.
fwd_op_num
=
self
.
get_program_desc
()
self
.
attrs
=
self
.
prepare_attrs
()
self
.
attrs
[
'program_id'
]
=
_hash_with_id
(
self
.
program_desc
)
with
fluid
.
dygraph
.
guard
(
place
):
# Step 1. run forward
inputs
,
input_param_list
=
self
.
prepare_dygraph_input
(
place
,
True
)
outputs
=
self
.
prepare_dygraph_output
()
framework
.
_dygraph_tracer
().
trace_op
(
type
=
self
.
op_type
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
self
.
attrs
)
_C_ops
.
run_program
(
inputs
[
'X'
],
inputs
[
'Params'
],
outputs
[
'Out'
],
outputs
[
'OutScope'
],
outputs
[
'DOut'
],
*
self
.
attrs
)
for
param
in
input_param_list
:
var_type
=
self
.
_get_grad_vartype
(
param
.
name
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录