Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
ec45a0a5
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ec45a0a5
编写于
1月 16, 2023
作者:
J
Jiabin Yang
提交者:
GitHub
1月 16, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Revert "[static code gen]Add phi and fluid info in static code gen (#49763)"
This reverts commit
4d5265b8
.
上级
bd03652f
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
224 addition
and
236 deletion
+224
-236
paddle/fluid/framework/details/op_registry.h
paddle/fluid/framework/details/op_registry.h
+2
-2
paddle/fluid/operators/elementwise/elementwise_add_op.cc
paddle/fluid/operators/elementwise/elementwise_add_op.cc
+2
-2
paddle/fluid/operators/elementwise/elementwise_div_op.cc
paddle/fluid/operators/elementwise/elementwise_div_op.cc
+2
-2
paddle/fluid/operators/elementwise/elementwise_sub_op.cc
paddle/fluid/operators/elementwise/elementwise_sub_op.cc
+2
-2
paddle/fluid/operators/generator/filters.py
paddle/fluid/operators/generator/filters.py
+6
-17
paddle/fluid/operators/generator/generate_op.py
paddle/fluid/operators/generator/generate_op.py
+101
-92
paddle/fluid/operators/generator/generate_sparse_op.py
paddle/fluid/operators/generator/generate_sparse_op.py
+2
-14
paddle/fluid/operators/generator/generate_static_op.py
paddle/fluid/operators/generator/generate_static_op.py
+3
-9
paddle/fluid/operators/generator/parse_utils.py
paddle/fluid/operators/generator/parse_utils.py
+8
-7
paddle/fluid/operators/generator/templates/op.c.j2
paddle/fluid/operators/generator/templates/op.c.j2
+5
-3
paddle/fluid/operators/generator/templates/operator_utils.c.j2
...e/fluid/operators/generator/templates/operator_utils.c.j2
+86
-81
paddle/fluid/prim/tests/test_static_prim.cc
paddle/fluid/prim/tests/test_static_prim.cc
+2
-2
paddle/fluid/prim/utils/static/composite_grad_desc_maker.h
paddle/fluid/prim/utils/static/composite_grad_desc_maker.h
+3
-3
未找到文件。
paddle/fluid/framework/details/op_registry.h
浏览文件 @
ec45a0a5
...
...
@@ -63,7 +63,7 @@ using OpRegistryClasses = std::tuple< // NOLINT
TypePair
<
OpProtoAndCheckerMaker
,
kOpProtoAndCheckerMaker
>
,
// NOLINT
TypePair
<
GradOpDescMakerBase
,
kGradOpDescMaker
>
,
// NOLINT
TypePair
<
imperative
::
GradOpBaseMakerBase
,
kGradOpBaseMaker
>
,
// NOLINT
TypePair
<
prim
::
CompositeGrad
OpMakerBase
,
kGradCompOpDescMaker
>
,
// NOLINT
TypePair
<
prim
::
GradComposite
OpMakerBase
,
kGradCompOpDescMaker
>
,
// NOLINT
TypePair
<
VarTypeInference
,
kVarTypeInference
>
,
// NOLINT
TypePair
<
InferShapeBase
,
kShapeInference
>
,
// NOLINT
TypePair
<
InplaceOpInference
,
kInplaceOpInference
>
,
// NOLINT
...
...
@@ -262,7 +262,7 @@ struct OpInfoFiller<T, kGradCompOpDescMaker> {
info
->
grad_comp_op_maker_
,
nullptr
,
platform
::
errors
::
AlreadyExists
(
"
CompositeGrad
OpMakerBase of %s has been registered"
,
op_type
));
"
GradComposite
OpMakerBase of %s has been registered"
,
op_type
));
info
->
grad_comp_op_maker_
=
[](
const
OpDesc
&
fwd_op
,
...
...
paddle/fluid/operators/elementwise/elementwise_add_op.cc
浏览文件 @
ec45a0a5
...
...
@@ -52,8 +52,8 @@ class ElementwiseAddOpMaker : public ElementwiseOpMaker {
};
class
ElementwiseAddGradCompositeOpMaker
:
public
prim
::
CompositeGrad
OpMakerBase
{
using
prim
::
CompositeGradOpMakerBase
::
CompositeGrad
OpMakerBase
;
:
public
prim
::
GradComposite
OpMakerBase
{
using
prim
::
GradCompositeOpMakerBase
::
GradComposite
OpMakerBase
;
public:
void
Apply
()
override
{
...
...
paddle/fluid/operators/elementwise/elementwise_div_op.cc
浏览文件 @
ec45a0a5
...
...
@@ -68,8 +68,8 @@ class ElementwiseDivGradOpMaker : public framework::SingleGradOpMaker<T> {
};
class
ElementwiseDivGradCompositeOpMaker
:
public
prim
::
CompositeGrad
OpMakerBase
{
using
prim
::
CompositeGradOpMakerBase
::
CompositeGrad
OpMakerBase
;
:
public
prim
::
GradComposite
OpMakerBase
{
using
prim
::
GradCompositeOpMakerBase
::
GradComposite
OpMakerBase
;
public:
void
Apply
()
override
{
...
...
paddle/fluid/operators/elementwise/elementwise_sub_op.cc
浏览文件 @
ec45a0a5
...
...
@@ -55,8 +55,8 @@ class ElementwiseSubOpMaker : public ElementwiseOpMaker {
};
class
ElementwiseSubGradCompositeOpMaker
:
public
prim
::
CompositeGrad
OpMakerBase
{
using
prim
::
CompositeGradOpMakerBase
::
CompositeGrad
OpMakerBase
;
:
public
prim
::
GradComposite
OpMakerBase
{
using
prim
::
GradCompositeOpMakerBase
::
GradComposite
OpMakerBase
;
public:
void
Apply
()
override
{
...
...
paddle/fluid/operators/generator/filters.py
浏览文件 @
ec45a0a5
...
...
@@ -14,7 +14,6 @@
import
itertools
import
re
from
typing
import
Dict
,
List
from
type_mapping
import
(
attr_types_map
,
...
...
@@ -138,23 +137,17 @@ def to_composite_grad_opmaker_name(backward_op_name):
for
i
in
range
(
len
(
words
)):
words
[
i
]
=
words
[
i
].
strip
()
words
[
i
]
=
words
[
i
].
capitalize
()
composite_grad_opmaker_name
=
""
.
join
(
word
for
word
in
words
)
composite_grad_opmaker_name
+=
"CompositeGradOpMaker"
composite_grad_opmaker_name
=
words
[
0
]
+
"Composite"
composite_grad_opmaker_name
+=
""
.
join
(
word
for
word
in
words
[
1
:])
composite_grad_opmaker_name
+=
"OpMaker"
return
composite_grad_opmaker_name
def
to_variable_names
(
dict_list
:
List
[
Dict
],
key
:
str
)
->
List
[
str
]:
names
=
[]
for
var
in
dict_list
:
names
.
append
(
var
[
key
])
return
names
def
cartesian_prod_attrs
(
attrs
):
items
=
[]
for
attr
in
attrs
:
type_name
=
attr
[
"typename"
]
name
=
attr
[
"
fluid_
name"
]
name
=
attr
[
"name"
]
if
type_name
==
"Scalar"
:
items
.
append
((
name
,
to_scalar_tensor_name
(
attr
)))
elif
type_name
==
"IntArray"
:
...
...
@@ -183,15 +176,11 @@ def cartesian_prod_attrs(attrs):
def
cartesian_prod_mapping
(
op
):
kernels
=
op
[
"kernel"
][
"func"
]
inputs
=
[
x
[
"fluid_name"
]
for
x
in
op
[
"inputs"
]
if
x
[
"fluid_name"
]
in
op
[
"kernel"
][
"param"
]
x
[
"name"
]
for
x
in
op
[
"inputs"
]
if
x
[
"name"
]
in
op
[
"kernel"
][
"param"
]
]
inputs
=
[
to_opmaker_name_cstr
(
input
)
for
input
in
inputs
]
attrs
=
cartesian_prod_attrs
(
op
[
"attrs"
])
outputs
=
[
to_opmaker_name_cstr
(
output
[
"fluid_name"
])
for
output
in
op
[
"outputs"
]
]
outputs
=
[
to_opmaker_name_cstr
(
output
[
"name"
])
for
output
in
op
[
"outputs"
]]
def
vec
(
items
):
return
"{"
+
', '
.
join
(
items
)
+
"}"
...
...
paddle/fluid/operators/generator/generate_op.py
浏览文件 @
ec45a0a5
...
...
@@ -28,7 +28,6 @@ from filters import (
to_opmaker_name_cstr
,
to_pascal_case
,
to_scalar_tensor_name
,
to_variable_names
,
)
from
jinja2
import
Environment
,
FileSystemLoader
,
StrictUndefined
from
parse_utils
import
to_named_dict
...
...
@@ -61,7 +60,6 @@ env.filters["to_input_name"] = to_input_name
env
.
filters
[
"to_opmaker_name_cstr"
]
=
to_opmaker_name_cstr
env
.
filters
[
"cartesian_prod_mapping"
]
=
cartesian_prod_mapping
env
.
filters
[
"to_composite_grad_opmaker_name"
]
=
to_composite_grad_opmaker_name
env
.
filters
[
"to_variable_names"
]
=
to_variable_names
env
.
tests
[
"base_op"
]
=
is_base_op
env
.
tests
[
"composite_op"
]
=
is_composite_op
env
.
tests
[
"vec"
]
=
is_vec
...
...
@@ -159,26 +157,29 @@ def process_int_array(op_item, int_array_configs):
]
def
add_composite_info
(
ops
,
backward_ops
,
backward_op_dict
):
# add backward composite name in forward
for
op
in
ops
+
backward_ops
:
if
(
op
[
"backward"
]
in
backward_op_dict
and
"composite"
in
backward_op_dict
[
op
[
"backward"
]]
):
op
[
"backward_composite"
]
=
op
[
"backward"
]
else
:
op
[
"backward_composite"
]
=
None
# add fluid name in ops and backward ops info
def
add_fluid_name
(
dict_list
):
for
item
in
dict_list
:
item
[
"fluid_name"
]
=
item
[
"name"
]
# add fluid name of op and params for OpMaker
def
add_compat_name
(
op_fluid_map_list
,
forward_op_dict
,
backward_op_dict
):
def
parse_composite_info
(
ops
,
backward_ops
,
backward_op_dict
):
for
op
in
ops
:
if
"backward"
in
op
:
op
[
"phi_backward"
]
=
op
[
"backward"
]
for
backward_op
in
backward_ops
:
if
"backward"
in
backward_op
:
backward_op
[
"phi_backward"
]
=
backward_op
[
"backward"
]
for
backward_op_name
,
op_dict
in
backward_op_dict
.
items
():
if
"composite"
not
in
op_dict
:
continue
op_dict
[
"composite"
][
"phi_inputs"
]
=
[]
op_dict
[
"composite"
][
"phi_attrs"
]
=
[]
op_dict
[
"composite"
][
"phi_outputs"
]
=
[]
for
input
in
op_dict
[
"inputs"
]:
op_dict
[
"composite"
][
"phi_inputs"
].
append
(
input
[
'name'
])
for
attr
in
op_dict
[
"attrs"
]:
op_dict
[
"composite"
][
"phi_attrs"
].
append
(
attr
[
'name'
])
for
output
in
op_dict
[
"outputs"
]:
op_dict
[
"composite"
][
"phi_outputs"
].
append
(
output
[
'name'
])
# replace name of op and params for OpMaker
def
replace_compat_name
(
op_fluid_map_list
,
forward_op_dict
,
backward_op_dict
):
def
get_phi_and_fluid_op_name
(
op_item
):
names
=
op_item
.
split
(
'('
)
if
len
(
names
)
==
1
:
...
...
@@ -186,14 +187,12 @@ def add_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict):
else
:
return
names
[
0
].
strip
(),
names
[
1
].
split
(
')'
)[
0
].
strip
()
def
add
_op_param_name
(
op_args
,
args_alias_map
):
def
update
_op_param_name
(
op_args
,
args_alias_map
):
for
item
in
op_args
:
if
item
[
'name'
]
in
args_alias_map
:
item
[
'fluid_name'
]
=
args_alias_map
[
item
[
'name'
]]
else
:
item
[
'fluid_name'
]
=
item
[
'name'
]
item
[
'name'
]
=
args_alias_map
[
item
[
'name'
]]
def
add
_grad_args_name
(
op_args
,
args_alias_map
):
def
update
_grad_args_name
(
op_args
,
args_alias_map
):
for
item
in
op_args
:
if
(
item
[
'name'
].
endswith
(
'_grad'
)
...
...
@@ -202,12 +201,38 @@ def add_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict):
args_alias_map
[
item
[
'name'
]]
=
(
args_alias_map
[
item
[
'name'
][:
-
5
]]
+
'_grad'
)
item
[
'fluid_name'
]
=
args_alias_map
[
item
[
'name'
][:
-
5
]]
+
'_grad'
elif
(
item
[
'name'
].
endswith
(
'_grad'
)
and
item
[
'name'
][:
-
5
]
not
in
args_alias_map
):
item
[
'fluid_name'
]
=
item
[
'name'
]
item
[
'name'
]
=
args_alias_map
[
item
[
'name'
][:
-
5
]]
+
'_grad'
def
add_fluid_info_in_composite
(
composite_map
,
args_alias_map
):
fluid_input_list
=
[]
fluid_attr_list
=
[]
fluid_output_list
=
[]
# add fluid op inputs
for
input
in
composite_map
[
"phi_inputs"
]:
if
input
in
args_alias_map
:
fluid_input_list
.
append
(
args_alias_map
[
input
])
else
:
fluid_input_list
.
append
(
input
)
# add fluid op attrs
for
attr
in
composite_map
[
"phi_attrs"
]:
if
attr
in
args_alias_map
:
fluid_attr_list
.
append
(
args_alias_map
[
attr
])
else
:
fluid_attr_list
.
append
(
attr
)
# add fluid op outputs
for
output
in
composite_map
[
"phi_outputs"
]:
if
output
in
args_alias_map
:
fluid_output_list
.
append
(
args_alias_map
[
output
])
else
:
fluid_output_list
.
append
(
output
)
composite_map
.
update
(
{
"fluid_inputs"
:
fluid_input_list
,
"fluid_attrs"
:
fluid_attr_list
,
"fluid_outputs"
:
fluid_output_list
,
}
)
def
get_param_list_alias
(
param_list
,
args_map
):
return
[
...
...
@@ -262,15 +287,15 @@ def add_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict):
op_item
[
'kernel'
][
'layout'
][
'candidates'
],
args_name_map
)
def
add
_grad_op_compat_name
(
grad_op_item
,
args_name_map
):
add
_op_param_name
(
grad_op_item
[
'inputs'
],
args_name_map
)
add
_op_param_name
(
grad_op_item
[
'outputs'
],
args_name_map
)
add
_op_param_name
(
grad_op_item
[
'attrs'
],
args_name_map
)
add
_op_param_name
(
grad_op_item
[
'forward'
][
'inputs'
],
args_name_map
)
add
_op_param_name
(
grad_op_item
[
'forward'
][
'outputs'
],
args_name_map
)
add
_op_param_name
(
grad_op_item
[
'forward'
][
'attrs'
],
args_name_map
)
add
_grad_args_name
(
grad_op_item
[
'inputs'
],
args_map
)
add
_grad_args_name
(
grad_op_item
[
'outputs'
],
args_map
)
def
update
_grad_op_compat_name
(
grad_op_item
,
args_name_map
):
update
_op_param_name
(
grad_op_item
[
'inputs'
],
args_name_map
)
update
_op_param_name
(
grad_op_item
[
'outputs'
],
args_name_map
)
update
_op_param_name
(
grad_op_item
[
'attrs'
],
args_name_map
)
update
_op_param_name
(
grad_op_item
[
'forward'
][
'inputs'
],
args_name_map
)
update
_op_param_name
(
grad_op_item
[
'forward'
][
'outputs'
],
args_name_map
)
update
_op_param_name
(
grad_op_item
[
'forward'
][
'attrs'
],
args_name_map
)
update
_grad_args_name
(
grad_op_item
[
'inputs'
],
args_map
)
update
_grad_args_name
(
grad_op_item
[
'outputs'
],
args_map
)
for
op_args
in
op_fluid_map_list
:
new_op_name
,
op_name
=
get_phi_and_fluid_op_name
(
op_args
[
'op'
])
...
...
@@ -315,32 +340,39 @@ def add_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict):
int_array_configs
[
op_args
[
key
][
args_item
[
'name'
]]
]
=
int_array_configs
[
args_item
[
'name'
]]
args_item
[
'fluid_name'
]
=
op_args
[
key
][
args_item
[
'name'
]
]
args_item
[
'name'
]
=
op_args
[
key
][
args_item
[
'name'
]]
if
has_backward
:
for
args_item
in
backward_op_item
[
'forward'
][
key
]:
if
args_item
[
'name'
]
in
op_args
[
key
]:
args_item
[
'name'
]
=
op_args
[
key
][
args_item
[
'name'
]]
forward_op_item
[
"attr_dict"
]
=
to_named_dict
(
forward_op_item
[
"attrs"
])
update_common_params_name
(
forward_op_item
,
args_map
,
scalar_configs
,
int_array_configs
)
if
has_backward
:
# update fluid info in backward
add_grad_op_compat_name
(
backward_op_item
,
args_map
)
update_grad_op_compat_name
(
backward_op_item
,
args_map
)
update_common_params_name
(
backward_op_item
,
args_map
,
scalar_configs
,
int_array_configs
)
backward_op_item
[
"attr_dict"
]
=
to_named_dict
(
backward_op_item
[
"attrs"
]
)
if
'backward'
not
in
op_args
:
continue
backward_op_list
=
op_args
[
'backward'
].
split
(
','
)
phi_bw_op_name
,
bw_op_name
=
get_phi_and_fluid_op_name
(
backward_op_list
[
0
]
)
if
(
forward_op_item
[
"backward_composite"
]
is
not
None
and
phi_bw_op_name
!=
bw_op_name
):
forward_op_item
[
"backward_composite"
]
=
bw_op_name
# add fluid args name in composite map
for
backward_op
in
backward_op_list
:
if
(
"composite"
in
backward_op_dict
[
backward_op
.
split
(
'('
)[
0
].
strip
()]
):
add_fluid_info_in_composite
(
backward_op_dict
[
backward_op
][
"composite"
],
args_map
)
_
,
bw_op_name
=
get_phi_and_fluid_op_name
(
backward_op_list
[
0
])
forward_op_item
[
'backward'
]
=
bw_op_name
backward_op_item
[
'op_name'
]
=
bw_op_name
...
...
@@ -351,20 +383,18 @@ def add_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict):
double_grad_op_name
,
)
=
get_phi_and_fluid_op_name
(
backward_op_list
[
1
])
double_grad_item
=
backward_op_dict
[
phi_double_grad_op_name
]
if
(
backward_op_item
[
"backward_composite"
]
is
not
None
and
phi_double_grad_op_name
!=
double_grad_op_name
):
backward_op_item
[
"backward_composite"
]
=
double_grad_op_name
backward_op_item
[
'backward'
]
=
double_grad_op_name
double_grad_item
[
'op_name'
]
=
double_grad_op_name
add
_grad_op_compat_name
(
double_grad_item
,
args_map
)
update
_grad_op_compat_name
(
double_grad_item
,
args_map
)
update_common_params_name
(
double_grad_item
,
args_map
,
scalar_configs
,
int_array_configs
,
)
double_grad_item
[
"attr_dict"
]
=
to_named_dict
(
double_grad_item
[
"attrs"
]
)
# for triple grad
if
len
(
backward_op_list
)
>
2
:
...
...
@@ -373,22 +403,18 @@ def add_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict):
triple_grad_op_name
,
)
=
get_phi_and_fluid_op_name
(
backward_op_list
[
2
])
triple_grad_item
=
backward_op_dict
[
phi_triple_grad_op_name
]
if
(
double_grad_item
[
"backward_composite"
]
is
not
None
and
phi_triple_grad_op_name
!=
triple_grad_op_name
):
double_grad_item
[
"backward_composite"
]
=
triple_grad_op_name
double_grad_item
[
'backward'
]
=
triple_grad_op_name
triple_grad_item
[
'op_name'
]
=
triple_grad_op_name
add
_grad_op_compat_name
(
triple_grad_item
,
args_map
)
update
_grad_op_compat_name
(
triple_grad_item
,
args_map
)
update_common_params_name
(
triple_grad_item
,
args_map
,
scalar_configs
,
int_array_configs
,
)
triple_grad_item
[
"attr_dict"
]
=
to_named_dict
(
triple_grad_item
[
"attrs"
]
)
def
process_invoke_op
(
forward_op_dict
,
backward_op_dict
):
...
...
@@ -406,28 +432,20 @@ def process_invoke_op(forward_op_dict, backward_op_dict):
for
input_item
in
reuse_op
[
'inputs'
]:
bw_op
[
'invoke'
][
'inputs'
].
append
(
{
'fluid_name'
:
input_item
[
'fluid_name'
],
'name'
:
input_item
[
'name'
],
'value'
:
args_list
[
args_index
],
}
)
args_index
=
args_index
+
1
bw_fluid_attrs_set
=
[
item
[
'fluid_name'
]
for
item
in
bw_op
[
'attrs'
]
]
for
attr
in
reuse_op
[
'attrs'
]:
if
args_index
<
len
(
args_list
):
attr_value
=
(
f
"this->GetAttr(
\"
{
args_list
[
args_index
]
}
\"
)"
if
args_list
[
args_index
]
in
bw_
fluid_attrs_set
if
args_list
[
args_index
]
in
bw_
op
[
'attr_dict'
]
else
args_list
[
args_index
]
)
bw_op
[
'invoke'
][
'attrs'
].
append
(
{
'name'
:
attr
[
'name'
],
'fluid_name'
:
attr
[
'fluid_name'
],
'value'
:
attr_value
,
}
{
'name'
:
attr
[
'name'
],
'value'
:
attr_value
}
)
args_index
=
args_index
+
1
else
:
...
...
@@ -436,8 +454,7 @@ def process_invoke_op(forward_op_dict, backward_op_dict):
bw_op
[
'invoke'
][
'outputs'
].
append
(
{
'name'
:
output_item
[
'name'
],
'fluid_name'
:
output_item
[
'fluid_name'
],
'value'
:
bw_op
[
'outputs'
][
idx
][
'fluid_name'
],
'value'
:
bw_op
[
'outputs'
][
idx
][
'name'
],
}
)
...
...
@@ -490,26 +507,17 @@ def main(
for
op
in
ops
:
op
[
'op_name'
]
=
op
[
'name'
]
add_fluid_name
(
op
[
'inputs'
])
add_fluid_name
(
op
[
'attrs'
])
add_fluid_name
(
op
[
'outputs'
])
for
bw_op
in
backward_ops
:
bw_op
[
'op_name'
]
=
bw_op
[
'name'
]
add_fluid_name
(
bw_op
[
'inputs'
])
add_fluid_name
(
bw_op
[
'attrs'
])
add_fluid_name
(
bw_op
[
'outputs'
])
add_fluid_name
(
bw_op
[
'forward'
][
'inputs'
])
add_fluid_name
(
bw_op
[
'forward'
][
'attrs'
])
add_fluid_name
(
bw_op
[
'forward'
][
'outputs'
])
for
bw_output
in
bw_op
[
'outputs'
]:
bw_output
[
'drop_empty_grad'
]
=
True
# deal the drop_empty_grad of bw_op by op_compat.yaml
parse_drop_empty_grad
(
op_fluid_map_list
,
backward_op_dict
)
add
_composite_info
(
ops
,
backward_ops
,
backward_op_dict
)
parse
_composite_info
(
ops
,
backward_ops
,
backward_op_dict
)
add
_compat_name
(
op_fluid_map_list
,
forward_op_dict
,
backward_op_dict
)
replace
_compat_name
(
op_fluid_map_list
,
forward_op_dict
,
backward_op_dict
)
# prepare for invoke case
process_invoke_op
(
forward_op_dict
,
backward_op_dict
)
...
...
@@ -537,6 +545,7 @@ def main(
ops
=
ops
,
backward_ops
=
backward_ops
,
op_dict
=
op_dict
,
composite_gen_flag
=
True
,
)
f
.
write
(
msg
)
ks_template
=
env
.
get_template
(
'ks.c.j2'
)
...
...
paddle/fluid/operators/generator/generate_sparse_op.py
浏览文件 @
ec45a0a5
...
...
@@ -28,14 +28,12 @@ from filters import (
to_opmaker_name_cstr
,
to_pascal_case
,
to_scalar_tensor_name
,
to_variable_names
,
)
from
generate_op
import
add_fluid_name
,
process_invoke_op
from
generate_op
import
process_invoke_op
from
jinja2
import
Environment
,
FileSystemLoader
,
StrictUndefined
from
parse_utils
import
to_named_dict
from
tests
import
(
is_base_op
,
is_composite_op
,
is_initializer_list
,
is_scalar
,
is_vec
,
...
...
@@ -62,9 +60,7 @@ env.filters["to_input_name"] = to_input_name
env
.
filters
[
"to_opmaker_name_cstr"
]
=
to_opmaker_name_cstr
env
.
filters
[
"cartesian_prod_mapping"
]
=
cartesian_prod_mapping
env
.
filters
[
"to_composite_grad_opmaker_name"
]
=
to_composite_grad_opmaker_name
env
.
filters
[
"to_variable_names"
]
=
to_variable_names
env
.
tests
[
"base_op"
]
=
is_base_op
env
.
tests
[
"composite_op"
]
=
is_composite_op
env
.
tests
[
"vec"
]
=
is_vec
env
.
tests
[
"scalar"
]
=
is_scalar
env
.
tests
[
"initializer_list"
]
=
is_initializer_list
...
...
@@ -100,18 +96,9 @@ def main(op_yaml_path, backward_yaml_path, output_op_path, output_arg_map_path):
op
[
'name'
]
=
op
[
'op_name'
]
if
op
[
"backward"
]
is
not
None
:
op
[
"backward"
]
=
SPARSE_OP_PREFIX
+
op
[
"backward"
]
add_fluid_name
(
op
[
"inputs"
])
add_fluid_name
(
op
[
"attrs"
])
add_fluid_name
(
op
[
"outputs"
])
for
bw_op
in
backward_ops
:
bw_op
[
'op_name'
]
=
SPARSE_OP_PREFIX
+
bw_op
[
'name'
]
bw_op
[
'name'
]
=
bw_op
[
'op_name'
]
add_fluid_name
(
bw_op
[
"inputs"
])
add_fluid_name
(
bw_op
[
"attrs"
])
add_fluid_name
(
bw_op
[
"outputs"
])
add_fluid_name
(
bw_op
[
"forward"
][
"inputs"
])
add_fluid_name
(
bw_op
[
"forward"
][
"attrs"
])
add_fluid_name
(
bw_op
[
"forward"
][
"outputs"
])
if
'invoke'
in
bw_op
:
bw_op
[
'invoke'
][
'args'
]
=
[
param
.
strip
()
for
param
in
bw_op
[
'invoke'
][
'args'
].
split
(
','
)
...
...
@@ -152,6 +139,7 @@ def main(op_yaml_path, backward_yaml_path, output_op_path, output_arg_map_path):
ops
=
ops
,
backward_ops
=
backward_ops
,
op_dict
=
op_dict
,
composite_gen_flag
=
False
,
)
f
.
write
(
msg
)
...
...
paddle/fluid/operators/generator/generate_static_op.py
浏览文件 @
ec45a0a5
...
...
@@ -28,14 +28,12 @@ from filters import (
to_opmaker_name_cstr
,
to_pascal_case
,
to_scalar_tensor_name
,
to_variable_names
,
)
from
generate_op
import
add_compat_name
,
add_fluid
_name
from
generate_op
import
replace_compat
_name
from
jinja2
import
Environment
,
FileSystemLoader
,
StrictUndefined
from
parse_utils
import
to_named_dict
from
tests
import
(
is_base_op
,
is_composite_op
,
is_initializer_list
,
is_scalar
,
is_vec
,
...
...
@@ -62,9 +60,7 @@ env.filters["to_input_name"] = to_input_name
env
.
filters
[
"to_opmaker_name_cstr"
]
=
to_opmaker_name_cstr
env
.
filters
[
"cartesian_prod_mapping"
]
=
cartesian_prod_mapping
env
.
filters
[
"to_composite_grad_opmaker_name"
]
=
to_composite_grad_opmaker_name
env
.
filters
[
"to_variable_names"
]
=
to_variable_names
env
.
tests
[
"base_op"
]
=
is_base_op
env
.
tests
[
"composite_op"
]
=
is_composite_op
env
.
tests
[
"vec"
]
=
is_vec
env
.
tests
[
"scalar"
]
=
is_scalar
env
.
tests
[
"initializer_list"
]
=
is_initializer_list
...
...
@@ -104,11 +100,8 @@ def main(
for
op
in
ops
:
op
[
'op_name'
]
=
op
[
'name'
]
add_fluid_name
(
op
[
"inputs"
])
add_fluid_name
(
op
[
"attrs"
])
add_fluid_name
(
op
[
"outputs"
])
add
_compat_name
(
op_op_map
,
forward_op_dict
,
{})
replace
_compat_name
(
op_op_map
,
forward_op_dict
,
{})
if
len
(
ops
)
==
0
:
if
os
.
path
.
isfile
(
output_op_path
):
...
...
@@ -123,6 +116,7 @@ def main(
ops
=
ops
,
backward_ops
=
[],
op_dict
=
forward_op_dict
,
composite_gen_flag
=
False
,
)
f
.
write
(
msg
)
...
...
paddle/fluid/operators/generator/parse_utils.py
浏览文件 @
ec45a0a5
...
...
@@ -294,13 +294,14 @@ def parse_composite(
composite_config
:
str
,
)
->
Dict
[
str
,
Any
]:
# composite_config: func(args1, args2,.....)
result
=
re
.
search
(
r
"(?P<func_name>[a-z][a-z0-9_]+)\s*\((?P<func_args>[^\)]+)\)"
,
composite_config
,
)
func_name
=
result
.
group
(
"func_name"
)
func_args
=
result
.
group
(
"func_args"
)
fname
=
r
'(.*?)'
wspace
=
r
'\s*'
fargs
=
r
'(.*?)'
pattern
=
fr
'
{
fname
}{
wspace
}
\(
{
wspace
}{
fargs
}{
wspace
}
\)'
m
=
re
.
search
(
pattern
,
composite_config
)
func_name
=
m
.
group
(
1
)
func_args
=
m
.
group
(
2
)
composite_dict
=
{}
composite_dict
[
"func_name"
]
=
func_name
...
...
paddle/fluid/operators/generator/templates/op.c.j2
浏览文件 @
ec45a0a5
...
...
@@ -39,9 +39,11 @@ using paddle::framework::GradVarName;
{% else %}
{{backward_op_reused_maker(op, op_dict[op["forward"]["name"]], op["invoke"])}}
{% endif %}
{% if op is composite_op %}
{% if composite_gen_flag == True %}
{% if op is composite_op %}
{{composite_grad_op_maker(op_dict[op["name"]])}}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
} // namespace operators
} // namespace paddle
...
...
@@ -49,7 +51,7 @@ using paddle::framework::GradVarName;
namespace ops = paddle::operators;
{% for op in ops + backward_ops %}
{% if op is base_op %}
{{register_op_with_components(op)}}
{{register_op_with_components(op
, op_dict
)}}
{{register_op_version(op)}}
{% endif %}
{% endfor %}
paddle/fluid/operators/generator/templates/operator_utils.c.j2
浏览文件 @
ec45a0a5
...
...
@@ -12,7 +12,7 @@ class {{op_name | to_pascal_case}}OpMaker : public framework::OpProtoAndCheckerM
{{add_output(loop.index0, output, op_name)}};
{% endfor %}
{% for attr in op["attrs"] %}
{% if attr["
fluid_
name"] in op["kernel"]["param"] %}
{% if attr["name"] in op["kernel"]["param"] %}
{{add_attr(loop.index0, attr, op_name)}};
{% endif %}
{% endfor %}
...
...
@@ -27,7 +27,7 @@ TODO: Documentation of {{op_name}} op.
{# add input, it could be duplicable or dispensable #}
{% macro add_input(i, input, op_name) %}{# inline #}
{% set name = input["
fluid_
name"] %}
{% set name = input["name"] %}
{% set typename = input["typename"] %}
AddInput({{name| to_opmaker_name}}, "({{typename}}), input {{i}} of {{op_name}} op.")
{%- if typename is vec %}
...
...
@@ -42,7 +42,7 @@ AddInput({{name| to_opmaker_name}}, "({{typename}}), input {{i}} of {{op_name}}
{# add output, it could be duplicable or intermediate, however, optional output is not supported #}
{% macro add_output(i, output, op_name) %}{# inline #}
{% set name = output["
fluid_
name"] %}
{% set name = output["name"] %}
{% set typename = output["typename"] %}
{% set is_intermediate = output["intermediate"] %}
AddOutput({{name | to_opmaker_name}}, "({{typename}}), output {{i}} of {{op_name}} op.")
...
...
@@ -66,7 +66,7 @@ AddOutput({{name | to_opmaker_name}}, "({{typename}}), output {{i}} of {{op_name
{# add attribute, and process default value if needed #}
{% macro add_attr(i, attr, op_name) %}{# inline #}
{% set name = attr["
fluid_
name"] %}
{% set name = attr["name"] %}
{% set typename = attr["typename"] %}
{% if typename is scalar %}
AddInput("{{attr | to_scalar_tensor_name}}", "attribute {{i}} for {{op_name}} op from 0D Tensor.")
...
...
@@ -153,15 +153,15 @@ All possible KernelSignatures returned by {{op["name"] | to_pascal_case }}OpArgu
{% set kernel_in_type_list = kernel_config["dispatch"][kernel_func][0] %}
if ( {%- for input in inputs %}
{%- if input["
fluid_
name"] in kernel_config["param"] %}
{%- if input["name"] in kernel_config["param"] %}
{%- if kernel_in_type_list[input_idx.idx] == "dense" %}
ctx.IsDenseTensorInput("{{input["
fluid_
name"]}}"){{" && " if not loop.last}}
ctx.IsDenseTensorInput("{{input["name"]}}"){{" && " if not loop.last}}
{%- elif kernel_in_type_list[input_idx.idx] == "selected_rows" %}
ctx.IsSelectedRowsInput("{{input["
fluid_
name"]}}"){{" && " if not loop.last}}
ctx.IsSelectedRowsInput("{{input["name"]}}"){{" && " if not loop.last}}
{%- elif kernel_in_type_list[input_idx.idx] == "sparse_coo" %}
ctx.IsSparseCooTensorInput("{{input["
fluid_
name"]}}"){{" && " if not loop.last}}
ctx.IsSparseCooTensorInput("{{input["name"]}}"){{" && " if not loop.last}}
{%- elif kernel_in_type_list[input_idx.idx] == "sparse_csr" %}
ctx.IsSparseCsrTensorInput("{{input["
fluid_
name"]}}"){{" && " if not loop.last}}
ctx.IsSparseCsrTensorInput("{{input["name"]}}"){{" && " if not loop.last}}
{%- endif %}
{% set input_idx.idx = input_idx.idx + 1 %}
{%- endif %}
...
...
@@ -210,8 +210,8 @@ PD_REGISTER_ARG_MAPPING_FN({{op["op_name"]}}, phi::{{op["op_name"] | to_pascal_c
{% macro get_input_list(inputs, kernel_args) %}{# inline #}
paddle::small_vector<const char*> inputs {
{%- for input in inputs %}
{%- if input["
fluid_
name"] in kernel_args %}
{{input["
fluid_
name"] | to_opmaker_name_cstr}}{{", " if not loop.last}}
{%- if input["name"] in kernel_args %}
{{input["name"] | to_opmaker_name_cstr}}{{", " if not loop.last}}
{%- endif %}
{%- endfor %}
}
...
...
@@ -219,8 +219,8 @@ paddle::small_vector<const char*> inputs {
{% macro get_an_attr(attr, kernel_args) %}{# inline #}
{% set typename = attr["typename"] %}
{%- if attr["
fluid_
name"] in kernel_args %}
{% set name = attr["
fluid_
name"] %}
{%- if attr["name"] in kernel_args %}
{% set name = attr["name"] %}
{% if typename is scalar %}{# scalar correspond to a dispensable input and an attr in opmaker #}
attrs.emplace_back(ctx.HasInput("{{attr | to_scalar_tensor_name}}") ? "{{attr | to_scalar_tensor_name}}" : "{{name}}");
{%- elif typename == "IntArray" %}
...
...
@@ -251,7 +251,7 @@ attrs.emplace_back("{{name}}");
{% macro get_output_list(outputs, kernel_args) %}{# inline #}
paddle::small_vector<const char*> outputs {
{%- for output in outputs %}
{{output["
fluid_
name"] | to_opmaker_name_cstr}}{{", " if not loop.last}}
{{output["name"] | to_opmaker_name_cstr}}{{", " if not loop.last}}
{%- endfor %}
}
{%- endmacro %}
...
...
@@ -263,7 +263,7 @@ phi::KernelKey GetExpectedKernelType(
{%if kernel["data_type"] is not none %}{# data type ---------------------------------#}
{% if kernel["data_type"]["candidates"] | length == 1 %}
{% set data_type_arg = kernel["data_type"]["candidates"][0] %}
{% set inputs = op["inputs"] | map(attribute="
fluid_
name") | list %}
{% set inputs = op["inputs"] | map(attribute="name") | list %}
{% if data_type_arg in inputs %}
auto data_type = framework::OperatorWithKernel::IndicateVarDataType(ctx, {{data_type_arg | to_opmaker_name}});
{% if kernel["data_type"]["to_complex_flag"][0] %}
...
...
@@ -319,8 +319,9 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER({{op["op_name"] | to_pascal_case}}NoNeedBuff
{% endif %}
{% endmacro%}
{% macro register_op_with_components(op) %}
{% macro register_op_with_components(op
, op_dict
) %}
{% set name = op["op_name"] %}
{% set phi_name = op["name"] %}
REGISTER_OPERATOR({{name}}, ops::{{name | to_pascal_case}}Op,
{% if not "forward" in op %}{# it is a forward op #}
ops::{{name | to_pascal_case}}OpMaker,
...
...
@@ -336,8 +337,8 @@ REGISTER_OPERATOR({{name}}, ops::{{name | to_pascal_case}}Op,
{% if op is supports_inplace %}{# inplace#}
ops::{{name | to_pascal_case}}InplaceInferer,
{% endif %}
{% if "
backward_composite" in op and op["backward_composite"] is not none
%}
ops::{{op["
backward_composite
"] | to_composite_grad_opmaker_name}},
{% if "
phi_backward" in op and op["phi_backward"] is not none and "composite" in op_dict[op["phi_backward"]]
%}
ops::{{op["
phi_backward
"] | to_composite_grad_opmaker_name}},
{% endif %}
{% if op is supports_no_need_buffer %}{# no_need_buffer #}
ops::{{name | to_pascal_case}}NoNeedBufferVarInferer,
...
...
@@ -390,12 +391,12 @@ REGISTER_OP_VERSION({{name}})
{# --------------------------------------- backward op maker ---------------------------------------------- #}
{% macro backward_op_maker(op, forward_op ) %}
{% set name = op["op_name"] %}
{% set forward_input_names = op["forward"]["inputs"] | map(attribute="
fluid_
name") | list %}
{% set forward_output_names = op["forward"]["outputs"] | map(attribute="
fluid_
name") | list %}
{% set forward_attr_names = op["forward"]["attrs"] | map(attribute="
fluid_
name") | list %}
{% set forward_input_orig_names = forward_op["inputs"] | map(attribute="
fluid_
name") | list %}
{% set forward_output_orig_names = forward_op["outputs"] | map(attribute="
fluid_
name") | list %}
{% set forward_attr_orig_names = forward_op["attrs"] | map(attribute="
fluid_
name") | list %}
{% set forward_input_names = op["forward"]["inputs"] | map(attribute="name") | list %}
{% set forward_output_names = op["forward"]["outputs"] | map(attribute="name") | list %}
{% set forward_attr_names = op["forward"]["attrs"] | map(attribute="name") | list %}
{% set forward_input_orig_names = forward_op["inputs"] | map(attribute="name") | list %}
{% set forward_output_orig_names = forward_op["outputs"] | map(attribute="name") | list %}
{% set forward_attr_orig_names = forward_op["attrs"] | map(attribute="name") | list %}
template <typename T>
class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker<T> {
public:
...
...
@@ -406,8 +407,8 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker<T>
grad_op->SetType("{{name}}");
{% for input in op["inputs"] %}
grad_op->SetInput({{input["
fluid_
name"] | to_opmaker_name}}, this->{{extract_input_from_forward(
input["
fluid_
name"],
grad_op->SetInput({{input["name"] | to_opmaker_name}}, this->{{extract_input_from_forward(
input["name"],
forward_input_names,
forward_output_names,
forward_input_orig_names,
...
...
@@ -415,8 +416,8 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker<T>
{% endfor %}
{% for output in op["outputs"] %}
grad_op->SetOutput({{output["
fluid_
name"] | to_opmaker_name}}, this->{{extract_output_from_forward(
output["
fluid_
name"],
grad_op->SetOutput({{output["name"] | to_opmaker_name}}, this->{{extract_output_from_forward(
output["name"],
forward_input_names,
forward_output_names,
forward_input_orig_names,
...
...
@@ -426,7 +427,7 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker<T>
grad_op->SetAttrMap(this->Attrs());
{% for attr in op["attrs"] %}
{% set attr_name = attr["
fluid_
name"] %}
{% set attr_name = attr["name"] %}
{% if attr_name in forward_attr_names %}
{% if attr["typename"] == "IntArray" %}
{% if 'tensor_name' in attr or 'manual_flag' not in attr %}
...
...
@@ -454,12 +455,12 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker<T>
{% macro backward_op_reused_maker(bw_op, forward_op, invoke_op) %}
{% set name = bw_op["op_name"] %}
{% set forward_input_names = bw_op["forward"]["inputs"] | map(attribute="
fluid_
name") | list %}
{% set forward_output_names = bw_op["forward"]["outputs"] | map(attribute="
fluid_
name") | list %}
{% set forward_attr_names = bw_op["forward"]["attrs"] | map(attribute="
fluid_
name") | list %}
{% set forward_input_orig_names = forward_op["inputs"] | map(attribute="
fluid_
name") | list %}
{% set forward_output_orig_names = forward_op["outputs"] | map(attribute="
fluid_
name") | list %}
{% set forward_attr_orig_names = forward_op["attrs"] | map(attribute="
fluid_
name") | list %}
{% set forward_input_names = bw_op["forward"]["inputs"] | map(attribute="name") | list %}
{% set forward_output_names = bw_op["forward"]["outputs"] | map(attribute="name") | list %}
{% set forward_attr_names = bw_op["forward"]["attrs"] | map(attribute="name") | list %}
{% set forward_input_orig_names = forward_op["inputs"] | map(attribute="name") | list %}
{% set forward_output_orig_names = forward_op["outputs"] | map(attribute="name") | list %}
{% set forward_attr_orig_names = forward_op["attrs"] | map(attribute="name") | list %}
template <typename T>
class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker<T> {
public:
...
...
@@ -470,7 +471,7 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker<T>
grad_op->SetType("{{invoke_op["func"]}}");
{% for input in invoke_op["inputs"] %}
grad_op->SetInput({{input["
fluid_
name"] | to_opmaker_name}}, this->{{extract_input_from_forward(
grad_op->SetInput({{input["name"] | to_opmaker_name}}, this->{{extract_input_from_forward(
input["value"],
forward_input_names,
forward_output_names,
...
...
@@ -479,7 +480,7 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker<T>
{% endfor %}
{% for output in invoke_op["outputs"] %}
grad_op->SetOutput({{output["
fluid_
name"] | to_opmaker_name}}, this->{{extract_output_from_forward(
grad_op->SetOutput({{output["name"] | to_opmaker_name}}, this->{{extract_output_from_forward(
output["value"],
forward_input_names,
forward_output_names,
...
...
@@ -489,49 +490,42 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker<T>
{% endfor %}
{% for attr in invoke_op["attrs"] %}
grad_op->SetAttr("{{attr["
fluid_
name"]}}", {{attr["value"]}});
grad_op->SetAttr("{{attr["name"]}}", {{attr["value"]}});
{% endfor %}
}
};
{% endmacro %}
{% macro composite_grad_op_maker(backward_op) %}
{% set op_name = backward_op["op_name"] %}
{% set inputs = backward_op["inputs"] | to_variable_names("name")%}
{% set input_dict = backward_op["input_dict"] %}
{% set fluid_inputs = backward_op["inputs"] | to_variable_names("fluid_name")%}
{% set forward_fluid_inputs = backward_op["forward"]["inputs"] | to_variable_names("fluid_name")%}
{% set forward_fluid_outputs = backward_op["forward"]["outputs"] | to_variable_names("fluid_name")%}
{% set attrs = backward_op["attrs"] | to_variable_names("name") %}
{% set fluid_attrs = backward_op["attrs"] | to_variable_names("fluid_name") %}
{% set attr_dict = backward_op["attr_dict"] %}
{% set outputs = backward_op["outputs"] | to_variable_names("name")%}
{% set output_dict = backward_op["output_dict"] %}
{% set fluid_outputs = backward_op["outputs"] | to_variable_names("fluid_name")%}
{% set composite_func_info = backward_op["composite"] %}
class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradOpMakerBase {
{% macro composite_grad_op_maker(composite_op_dict) %}
{% set op_name = composite_op_dict["name"] %}
class {{op_name | to_composite_grad_opmaker_name}} : public prim::GradCompositeOpMakerBase {
public:
using prim::
CompositeGradOpMakerBase::CompositeGrad
OpMakerBase;
using prim::
GradCompositeOpMakerBase::GradComposite
OpMakerBase;
void Apply() override {
//get inputs
{{construct_composite_input(
inputs, fluid_inputs, forward_fluid_inputs, forward_fluid_outputs, input
_dict)}}
{{construct_composite_input(
composite_op
_dict)}}
//get attr
{{construct_composite_attr(
attrs, fluid_attrs, attr
_dict)}}
{{construct_composite_attr(
composite_op
_dict)}}
//get output
{{construct_composite_output(
outputs, fluid_outputs, output
_dict)}}
{{construct_composite_output(
composite_op
_dict)}}
//get output ptr
{{construct_composite_output_ptr(
outputs, output
_dict)}}
{{construct_composite_output_ptr(
composite_op
_dict)}}
//get output orginal name
{{get_composite_output_orginal_name(
outputs, output
_dict)}}
{{get_composite_output_orginal_name(
composite_op
_dict)}}
//call composite backward func
{{call_composite_backward_api(composite_
func_info
)}}
{{call_composite_backward_api(composite_
op_dict
)}}
//recover output name
{{recover_composite_output_name(
outputs
)}}
{{recover_composite_output_name(
composite_op_dict
)}}
}
};
{%- endmacro %}
{% macro construct_composite_input(inputs, fluid_inputs, forward_fluid_inputs, forward_fluid_outputs, input_dict) %}
{% macro construct_composite_input(composite_op_dict) %}
{% set inputs = composite_op_dict["composite"]["phi_inputs"] %}
{% set input_dict = composite_op_dict["input_dict"] %}
{% set fluid_inputs = composite_op_dict["composite"]["fluid_inputs"] %}
{% set forward_fluid_inputs = composite_op_dict["forward"]["inputs"] | map(attribute="name") | list %}
{% set forward_fluid_outputs = composite_op_dict["forward"]["outputs"] | map(attribute="name") | list %}
{% set inputs_length = inputs | length %}
{% for i in range(inputs_length) %}
{% set input_typename = input_dict[inputs[i]]["typename"] %}
...
...
@@ -540,13 +534,13 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
{% if input_typename == "Tensor" %}
{% if input_optional_flag == True %}
paddle::optional<paddle::experimental::Tensor> {{inputs[i]}} = this->GetOptionalSingleForwardInput("{{fluid_inputs[i]}}");
{% else %}
{% el
if input_optional_flag == Fal
se %}
paddle::experimental::Tensor {{inputs[i]}} = this->GetSingleForwardInput("{{fluid_inputs[i]}}");
{% endif %}
{% elif input_typename == "Tensor[]" %}
{% if input_optional_flag == True %}
std::vector<paddle::optional<paddle::experimental::Tensor>> {{inputs[i]}} = this->GetOptionalMultiForwardInput("{{fluid_inputs[i]}}");
{% else %}
{% el
if input_optional_flag == Fal
se %}
std::vector<paddle::experimental::Tensor> {{inputs[i]}} = this->GetMultiForwardInput("{{fluid_inputs[i]}}");
{% endif %}
{% endif %}
...
...
@@ -554,13 +548,13 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
{% if input_typename == "Tensor" %}
{% if input_optional_flag == True %}
paddle::optional<paddle::experimental::Tensor> {{inputs[i]}} = this->GetOptionalSingleForwardOutput("{{fluid_inputs[i]}}");
{% else %}
{% el
if input_optional_flag == Fal
se %}
paddle::experimental::Tensor {{inputs[i]}} = this->GetSingleForwardOutput("{{fluid_inputs[i]}}");
{% endif %}
{% elif input_typename == "Tensor[]" %}
{% if input_optional_flag == True %}
std::vector<paddle::optional<paddle::experimental::Tensor>> {{inputs[i]}} = this->GetOptionalMultiForwardOutput("{{fluid_inputs[i]}}");
{% else %}
{% el
if input_optional_flag == Fal
se %}
std::vector<paddle::experimental::Tensor> {{inputs[i]}} = this->GetMultiForwardOutput("{{fluid_inputs[i]}}");
{% endif %}
{% endif %}
...
...
@@ -568,13 +562,13 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
{% if input_typename == "Tensor" %}
{% if input_optional_flag == True %}
paddle::optional<paddle::experimental::Tensor> {{inputs[i]}} = this->GetOptionalSingleOutputGrad("{{fluid_inputs[i][:-5]}}");
{% else %}
{% el
if input_optional_flag == Fal
se %}
paddle::experimental::Tensor {{inputs[i]}} = this->GetSingleOutputGrad("{{fluid_inputs[i][:-5]}}");
{% endif %}
{% elif input_typename == "Tensor[]" %}
{% if input_optional_flag == True %}
std::vector<paddle::optional<paddle::experimental::Tensor>> {{inputs[i]}} = this->GetOptionalMultiOutputGrad("{{fluid_inputs[i][:-5]}}");
{% else %}
{% el
if input_optional_flag == Fal
se %}
std::vector<paddle::experimental::Tensor> {{inputs[i]}} = this->GetMultiOutputGrad("{{fluid_inputs[i][:-5]}}");
{%- endif %}
{%- endif %}
...
...
@@ -582,18 +576,24 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
{%- endfor %}
{%- endmacro %}
{% macro construct_composite_attr(attrs, fluid_attrs, attr_dict) %}
{% macro construct_composite_attr(composite_op_dict) %}
{% set attrs = composite_op_dict["composite"]["phi_attrs"] %}
{% set fluid_attrs = composite_op_dict["composite"]["fluid_attrs"] %}
{% set fluid_attrs_dict = composite_op_dict["attr_dict"] %}
{% set attrs_length = attrs | length %}
{% for i in range(attrs_length) %}
{% set attrs_data_type =
attr_dict[
attrs[i]]["typename"] | to_op_attr_type %}
const
{{attrs_data_type}} {{attrs[i]}} = this->Attr<{{attrs_data_type}}>("{{fluid_attrs[i]}}");
{% set attrs_data_type =
fluid_attrs_dict[fluid_
attrs[i]]["typename"] | to_op_attr_type %}
{{attrs_data_type}} {{attrs[i]}} = this->Attr<{{attrs_data_type}}>("{{fluid_attrs[i]}}");
{% endfor %}
{%- endmacro %}
{% macro construct_composite_output(outputs, fluid_outputs, output_dict) %}
{% macro construct_composite_output(composite_op_dict) %}
{% set outputs = composite_op_dict["composite"]["phi_outputs"] %}
{% set fluid_outputs = composite_op_dict["composite"]["fluid_outputs"] %}
{% set outputs_dict = composite_op_dict["output_dict"] %}
{% set outputs_length = outputs | length %}
{% for i in range(outputs_length) %}
{% set output_typename = output_dict[outputs[i]]["typename"] %}
{% set output_typename = output
s
_dict[outputs[i]]["typename"] %}
{% if output_typename == "Tensor" %}
paddle::experimental::Tensor {{outputs[i] + "_t"}} = this->GetSingleInputGrad("{{fluid_outputs[i][:-5]}}");
{% elif output_typename == "Tensor[]" %}
...
...
@@ -602,10 +602,12 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
{%- endfor %}
{%- endmacro %}
{% macro construct_composite_output_ptr(outputs, output_dict) %}
{% macro construct_composite_output_ptr(composite_op_dict) %}
{% set outputs = composite_op_dict["composite"]["phi_outputs"] %}
{% set outputs_dict = composite_op_dict["output_dict"] %}
{% set outputs_length = outputs | length %}
{% for i in range(outputs_length) %}
{% set output_typename = output_dict[outputs[i]]["typename"] %}
{% set output_typename = output
s
_dict[outputs[i]]["typename"] %}
{% if output_typename == "Tensor" %}
paddle::experimental::Tensor* {{outputs[i]}} = this->GetOutputPtr(&{{outputs[i]+ "_t"}});
{% elif output_typename == "Tensor[]" %}
...
...
@@ -618,10 +620,12 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
{%- endfor %}
{%- endmacro %}
{% macro get_composite_output_orginal_name(outputs, output_dict) %}
{% macro get_composite_output_orginal_name(composite_op_dict) %}
{% set outputs = composite_op_dict["composite"]["phi_outputs"] %}
{% set outputs_dict = composite_op_dict["output_dict"] %}
{% set outputs_length = outputs | length %}
{% for i in range(outputs_length) %}
{% set output_typename = output_dict[outputs[i]]["typename"] %}
{% set output_typename = output
s
_dict[outputs[i]]["typename"] %}
{% if output_typename == "Tensor" %}
std::string {{outputs[i] + "_name"}} = this->GetOutputName({{outputs[i] + "_t"}});
{% elif output_typename == "Tensor[]" %}
...
...
@@ -630,12 +634,13 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
{%- endfor %}
{%- endmacro %}
{% macro call_composite_backward_api(composite_
func_info
) %}
VLOG(3) << "Runing {{composite_
func_info
["func_name"]}} composite func";
prim::{{composite_
func_info["func_name"]}}<prim::DescTensor>({{composite_func_info
["func_args"]}});
{% macro call_composite_backward_api(composite_
op_dict
) %}
VLOG(3) << "Runing {{composite_
op_dict["composite"]
["func_name"]}} composite func";
prim::{{composite_
op_dict["composite"]["func_name"]}}<prim::DescTensor>({{composite_op_dict["composite"]
["func_args"]}});
{%- endmacro %}
{% macro recover_composite_output_name(outputs) %}
{% macro recover_composite_output_name(composite_op_dict) %}
{% set outputs = composite_op_dict["composite"]["phi_outputs"] %}
{% set outputs_length = outputs | length %}
{% for i in range(outputs_length) %}
this->RecoverOutputName({{outputs[i] + "_t"}}, {{outputs[i] + "_name"}});
...
...
paddle/fluid/prim/tests/test_static_prim.cc
浏览文件 @
ec45a0a5
...
...
@@ -135,9 +135,9 @@ struct TestBaseProgram {
int
idx_
{
0
};
};
class
TestGradCompositeGradMaker
:
public
CompositeGrad
OpMakerBase
{
class
TestGradCompositeGradMaker
:
public
GradComposite
OpMakerBase
{
public:
using
prim
::
CompositeGradOpMakerBase
::
CompositeGrad
OpMakerBase
;
using
prim
::
GradCompositeOpMakerBase
::
GradComposite
OpMakerBase
;
void
Apply
()
override
{}
};
...
...
paddle/fluid/prim/utils/static/composite_grad_desc_maker.h
浏览文件 @
ec45a0a5
...
...
@@ -41,9 +41,9 @@ namespace prim {
argument DropEmptyIG in the derived classes.
*/
class
CompositeGrad
OpMakerBase
{
class
GradComposite
OpMakerBase
{
public:
explicit
CompositeGrad
OpMakerBase
(
explicit
GradComposite
OpMakerBase
(
const
framework
::
OpDesc
&
fwd_op
,
const
std
::
unordered_set
<
std
::
string
>&
no_grad_set
,
std
::
unordered_map
<
std
::
string
,
std
::
string
>*
grad_to_var
,
...
...
@@ -61,7 +61,7 @@ class CompositeGradOpMakerBase {
acting_program_
.
MutableBlock
(
0
));
}
virtual
~
CompositeGrad
OpMakerBase
()
=
default
;
virtual
~
GradComposite
OpMakerBase
()
=
default
;
virtual
std
::
vector
<
std
::
unique_ptr
<
framework
::
OpDesc
>>
operator
()()
{
this
->
Apply
();
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录