diff --git a/paddle/fluid/framework/details/op_registry.h b/paddle/fluid/framework/details/op_registry.h index 1edc84aba07d9753eb543db4e038b842706de811..4b109ba0dcff22ee55722a026305a671c0783557 100644 --- a/paddle/fluid/framework/details/op_registry.h +++ b/paddle/fluid/framework/details/op_registry.h @@ -63,7 +63,7 @@ using OpRegistryClasses = std::tuple< // NOLINT TypePair, // NOLINT TypePair, // NOLINT TypePair, // NOLINT - TypePair, // NOLINT + TypePair, // NOLINT TypePair, // NOLINT TypePair, // NOLINT TypePair, // NOLINT @@ -262,7 +262,7 @@ struct OpInfoFiller { info->grad_comp_op_maker_, nullptr, platform::errors::AlreadyExists( - "GradCompositeOpMakerBase of %s has been registered", op_type)); + "CompositeGradOpMakerBase of %s has been registered", op_type)); info->grad_comp_op_maker_ = [](const OpDesc& fwd_op, diff --git a/paddle/fluid/framework/op_info.h b/paddle/fluid/framework/op_info.h index 61a2373eb3479a06201443120ac4bca0ded498cd..bd4405f722844426a5e75b28a28898995cf348c0 100644 --- a/paddle/fluid/framework/op_info.h +++ b/paddle/fluid/framework/op_info.h @@ -43,7 +43,7 @@ class OpInfo { public: OpCreator creator_; GradOpMakerFN grad_op_maker_; - GradCompositeOpMakerFN grad_comp_op_maker_; + CompositeGradOpMakerFN grad_comp_op_maker_; proto::OpProto* proto_{nullptr}; OpAttrChecker* checker_{nullptr}; InferVarTypeFN infer_var_type_; @@ -84,7 +84,7 @@ class OpInfo { const GradOpMakerFN& GradOpMaker() const { return grad_op_maker_; } - const GradCompositeOpMakerFN& GradCompOpMaker() const { + const CompositeGradOpMakerFN& CompGradOpMaker() const { return grad_comp_op_maker_; } diff --git a/paddle/fluid/framework/type_defs.h b/paddle/fluid/framework/type_defs.h index 7c90925da4a885d40a91c09b78f165996a85fa83..13bd782ce403347c858333e2702a6360e62b697d 100644 --- a/paddle/fluid/framework/type_defs.h +++ b/paddle/fluid/framework/type_defs.h @@ -96,7 +96,7 @@ using GradOpMakerFN = std::function>( std::unordered_map* /*grad_to_var*/, const std::vector& grad_block)>; -using GradCompositeOpMakerFN = +using CompositeGradOpMakerFN = std::function>( const OpDesc&, const std::unordered_set& /*no_grad_set*/, diff --git a/paddle/fluid/operators/elementwise/elementwise_add_op.cc b/paddle/fluid/operators/elementwise/elementwise_add_op.cc index 11e0fa7dd1f9705e259d30d76aba852c6a526011..48a5d2e433a100061c4d8a903ea045a21828cf84 100644 --- a/paddle/fluid/operators/elementwise/elementwise_add_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_add_op.cc @@ -51,9 +51,9 @@ class ElementwiseAddOpMaker : public ElementwiseOpMaker { } }; -class ElementwiseAddGradCompositeOpMaker - : public prim::GradCompositeOpMakerBase { - using prim::GradCompositeOpMakerBase::GradCompositeOpMakerBase; +class ElementwiseAddCompositeGradOpMaker + : public prim::CompositeGradOpMakerBase { + using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; public: void Apply() override { @@ -122,7 +122,7 @@ REGISTER_OPERATOR(elementwise_add, ::paddle::operators::ElementwiseOpInferVarType, elementwise_addGradMaker<::paddle::framework::OpDesc>, elementwise_addGradMaker<::paddle::imperative::OpBase>, - ::paddle::operators::ElementwiseAddGradCompositeOpMaker, + ::paddle::operators::ElementwiseAddCompositeGradOpMaker, ::paddle::operators::ElementwiseOpInplaceInferer); namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise/elementwise_div_op.cc b/paddle/fluid/operators/elementwise/elementwise_div_op.cc index 3d62792d8513eabd228694b5ff6268cded58ca09..41549ede1ebc6184b56db0026afe179b290e4281 100644 --- a/paddle/fluid/operators/elementwise/elementwise_div_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_div_op.cc @@ -67,9 +67,9 @@ class ElementwiseDivGradOpMaker : public framework::SingleGradOpMaker { } }; -class ElementwiseDivGradCompositeOpMaker - : public prim::GradCompositeOpMakerBase { - using prim::GradCompositeOpMakerBase::GradCompositeOpMakerBase; +class ElementwiseDivCompositeGradOpMaker + : public prim::CompositeGradOpMakerBase { + using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; public: void Apply() override { @@ -123,7 +123,7 @@ REGISTER_OPERATOR(elementwise_div, ops::ElementwiseOp, ops::ElementwiseDivOpMaker, ops::ElementwiseOpInferVarType, - ops::ElementwiseDivGradCompositeOpMaker, + ops::ElementwiseDivCompositeGradOpMaker, ops::ElementwiseDivGradOpMaker, ops::ElementwiseDivGradOpMaker); diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_op.cc b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc index 4052f3e09e0cc2f29271bd0a9977340d39af4700..740c9381d92e233ceb2be3de156a9a62e1ac22f5 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mul_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.cc @@ -66,9 +66,9 @@ class ElementwiseMulOpGradMaker : public framework::SingleGradOpMaker { } }; -class ElementwiseMulGradCompositeOpMaker - : public prim::GradCompositeOpMakerBase { - using prim::GradCompositeOpMakerBase::GradCompositeOpMakerBase; +class ElementwiseMulCompositeGradOpMaker + : public prim::CompositeGradOpMakerBase { + using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; public: void Apply() override { @@ -155,7 +155,7 @@ REGISTER_OPERATOR(elementwise_mul, ops::ElementwiseOpInferVarType, ops::ElementwiseMulOpGradMaker, ops::ElementwiseMulOpGradMaker, - ops::ElementwiseMulGradCompositeOpMaker); + ops::ElementwiseMulCompositeGradOpMaker); REGISTER_OPERATOR( elementwise_mul_grad, ops::ElementwiseOpGrad, diff --git a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc index be839f123a1e9fb2a015bae592763eab34443b7f..2a9e14867acf1f3caf105a6b31c69d31f073df39 100644 --- a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc @@ -54,9 +54,9 @@ class ElementwiseSubOpMaker : public ElementwiseOpMaker { } }; -class ElementwiseSubGradCompositeOpMaker - : public prim::GradCompositeOpMakerBase { - using prim::GradCompositeOpMakerBase::GradCompositeOpMakerBase; +class ElementwiseSubCompositeGradOpMaker + : public prim::CompositeGradOpMakerBase { + using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; public: void Apply() override { @@ -109,7 +109,7 @@ REGISTER_OPERATOR(elementwise_sub, ::paddle::operators::ElementwiseOpInferVarType, elementwise_subGradMaker<::paddle::framework::OpDesc>, elementwise_subGradMaker<::paddle::imperative::OpBase>, - ::paddle::operators::ElementwiseSubGradCompositeOpMaker, + ::paddle::operators::ElementwiseSubCompositeGradOpMaker, ::paddle::operators::ElementwiseOpInplaceInferer); REGISTER_OPERATOR( diff --git a/paddle/fluid/operators/expand_v2_op.cc b/paddle/fluid/operators/expand_v2_op.cc index 9a867c040fcb8ea3a238ce340ab2f5234a7424d1..3c05ab9295c6769bc7b949bc55bcd2321c063ba4 100644 --- a/paddle/fluid/operators/expand_v2_op.cc +++ b/paddle/fluid/operators/expand_v2_op.cc @@ -193,8 +193,8 @@ class ExpandV2GradOpMaker : public framework::SingleGradOpMaker { } }; -class ExpandV2GradCompositeOpMaker : public prim::GradCompositeOpMakerBase { - using prim::GradCompositeOpMakerBase::GradCompositeOpMakerBase; +class ExpandV2CompositeGradOpMaker : public prim::CompositeGradOpMakerBase { + using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; public: void Apply() override { @@ -244,7 +244,7 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(expand_v2, ops::ExpandV2Op, ops::ExpandV2OpMaker, - ops::ExpandV2GradCompositeOpMaker, + ops::ExpandV2CompositeGradOpMaker, ops::ExpandV2GradOpMaker, ops::ExpandV2GradOpMaker, ExpandInferShapeFunctor); diff --git a/paddle/fluid/operators/generator/filters.py b/paddle/fluid/operators/generator/filters.py index 8efbac1f7e92c4591c58df9d036a43e6eb0366ab..50bc1f7bca8845ba6a390234624872cff82c974f 100644 --- a/paddle/fluid/operators/generator/filters.py +++ b/paddle/fluid/operators/generator/filters.py @@ -14,6 +14,7 @@ import itertools import re +from typing import Dict, List from type_mapping import ( attr_types_map, @@ -137,17 +138,23 @@ def to_composite_grad_opmaker_name(backward_op_name): for i in range(len(words)): words[i] = words[i].strip() words[i] = words[i].capitalize() - composite_grad_opmaker_name = words[0] + "Composite" - composite_grad_opmaker_name += "".join(word for word in words[1:]) - composite_grad_opmaker_name += "OpMaker" + composite_grad_opmaker_name = "".join(word for word in words[:-1]) + composite_grad_opmaker_name += "CompositeGradOpMaker" return composite_grad_opmaker_name +def to_variable_names(dict_list: List[Dict], key: str) -> List[str]: + names = [] + for var in dict_list: + names.append(var[key]) + return names + + def cartesian_prod_attrs(attrs): items = [] for attr in attrs: type_name = attr["typename"] - name = attr["name"] + name = attr["fluid_name"] if type_name == "Scalar": items.append((name, to_scalar_tensor_name(attr))) elif type_name == "IntArray": @@ -176,11 +183,15 @@ def cartesian_prod_attrs(attrs): def cartesian_prod_mapping(op): kernels = op["kernel"]["func"] inputs = [ - x["name"] for x in op["inputs"] if x["name"] in op["kernel"]["param"] + x["fluid_name"] + for x in op["inputs"] + if x["fluid_name"] in op["kernel"]["param"] ] inputs = [to_opmaker_name_cstr(input) for input in inputs] attrs = cartesian_prod_attrs(op["attrs"]) - outputs = [to_opmaker_name_cstr(output["name"]) for output in op["outputs"]] + outputs = [ + to_opmaker_name_cstr(output["fluid_name"]) for output in op["outputs"] + ] def vec(items): return "{" + ', '.join(items) + "}" diff --git a/paddle/fluid/operators/generator/generate_op.py b/paddle/fluid/operators/generator/generate_op.py index e4bb7041016d21f36764fc7cce0674c56f5f1b3d..2da40b1edd1143d1fa218c36fe8a74e286c0120b 100644 --- a/paddle/fluid/operators/generator/generate_op.py +++ b/paddle/fluid/operators/generator/generate_op.py @@ -28,6 +28,7 @@ from filters import ( to_opmaker_name_cstr, to_pascal_case, to_scalar_tensor_name, + to_variable_names, ) from jinja2 import Environment, FileSystemLoader, StrictUndefined from parse_utils import to_named_dict @@ -60,6 +61,7 @@ env.filters["to_input_name"] = to_input_name env.filters["to_opmaker_name_cstr"] = to_opmaker_name_cstr env.filters["cartesian_prod_mapping"] = cartesian_prod_mapping env.filters["to_composite_grad_opmaker_name"] = to_composite_grad_opmaker_name +env.filters["to_variable_names"] = to_variable_names env.tests["base_op"] = is_base_op env.tests["composite_op"] = is_composite_op env.tests["vec"] = is_vec @@ -157,29 +159,26 @@ def process_int_array(op_item, int_array_configs): ] -def parse_composite_info(ops, backward_ops, backward_op_dict): - for op in ops: - if "backward" in op: - op["phi_backward"] = op["backward"] - for backward_op in backward_ops: - if "backward" in backward_op: - backward_op["phi_backward"] = backward_op["backward"] - for backward_op_name, op_dict in backward_op_dict.items(): - if "composite" not in op_dict: - continue - op_dict["composite"]["phi_inputs"] = [] - op_dict["composite"]["phi_attrs"] = [] - op_dict["composite"]["phi_outputs"] = [] - for input in op_dict["inputs"]: - op_dict["composite"]["phi_inputs"].append(input['name']) - for attr in op_dict["attrs"]: - op_dict["composite"]["phi_attrs"].append(attr['name']) - for output in op_dict["outputs"]: - op_dict["composite"]["phi_outputs"].append(output['name']) - - -# replace name of op and params for OpMaker -def replace_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict): +def add_composite_info(ops, backward_ops, backward_op_dict): + # add backward composite name in forward + for op in ops + backward_ops: + if ( + op["backward"] in backward_op_dict + and "composite" in backward_op_dict[op["backward"]] + ): + op["backward_composite"] = op["backward"] + else: + op["backward_composite"] = None + + +# add fluid name in ops and backward ops info +def add_fluid_name(dict_list): + for item in dict_list: + item["fluid_name"] = item["name"] + + +# add fluid name of op and params for OpMaker +def add_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict): def get_phi_and_fluid_op_name(op_item): names = op_item.split('(') if len(names) == 1: @@ -187,12 +186,14 @@ def replace_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict): else: return names[0].strip(), names[1].split(')')[0].strip() - def update_op_param_name(op_args, args_alias_map): + def add_op_param_name(op_args, args_alias_map): for item in op_args: if item['name'] in args_alias_map: - item['name'] = args_alias_map[item['name']] + item['fluid_name'] = args_alias_map[item['name']] + else: + item['fluid_name'] = item['name'] - def update_grad_args_name(op_args, args_alias_map): + def add_grad_args_name(op_args, args_alias_map): for item in op_args: if ( item['name'].endswith('_grad') @@ -201,38 +202,12 @@ def replace_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict): args_alias_map[item['name']] = ( args_alias_map[item['name'][:-5]] + '_grad' ) - item['name'] = args_alias_map[item['name'][:-5]] + '_grad' - - def add_fluid_info_in_composite(composite_map, args_alias_map): - fluid_input_list = [] - fluid_attr_list = [] - fluid_output_list = [] - # add fluid op inputs - for input in composite_map["phi_inputs"]: - if input in args_alias_map: - fluid_input_list.append(args_alias_map[input]) - else: - fluid_input_list.append(input) - # add fluid op attrs - for attr in composite_map["phi_attrs"]: - if attr in args_alias_map: - fluid_attr_list.append(args_alias_map[attr]) - else: - fluid_attr_list.append(attr) - # add fluid op outputs - for output in composite_map["phi_outputs"]: - if output in args_alias_map: - fluid_output_list.append(args_alias_map[output]) - else: - fluid_output_list.append(output) - - composite_map.update( - { - "fluid_inputs": fluid_input_list, - "fluid_attrs": fluid_attr_list, - "fluid_outputs": fluid_output_list, - } - ) + item['fluid_name'] = args_alias_map[item['name'][:-5]] + '_grad' + elif ( + item['name'].endswith('_grad') + and item['name'][:-5] not in args_alias_map + ): + item['fluid_name'] = item['name'] def get_param_list_alias(param_list, args_map): return [ @@ -297,15 +272,15 @@ def replace_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict): op_item['kernel']['layout']['candidates'], args_name_map ) - def update_grad_op_compat_name(grad_op_item, args_name_map): - update_op_param_name(grad_op_item['inputs'], args_name_map) - update_op_param_name(grad_op_item['outputs'], args_name_map) - update_op_param_name(grad_op_item['attrs'], args_name_map) - update_op_param_name(grad_op_item['forward']['inputs'], args_name_map) - update_op_param_name(grad_op_item['forward']['outputs'], args_name_map) - update_op_param_name(grad_op_item['forward']['attrs'], args_name_map) - update_grad_args_name(grad_op_item['inputs'], args_map) - update_grad_args_name(grad_op_item['outputs'], args_map) + def add_grad_op_compat_name(grad_op_item, args_name_map): + add_op_param_name(grad_op_item['inputs'], args_name_map) + add_op_param_name(grad_op_item['outputs'], args_name_map) + add_op_param_name(grad_op_item['attrs'], args_name_map) + add_op_param_name(grad_op_item['forward']['inputs'], args_name_map) + add_op_param_name(grad_op_item['forward']['outputs'], args_name_map) + add_op_param_name(grad_op_item['forward']['attrs'], args_name_map) + add_grad_args_name(grad_op_item['inputs'], args_map) + add_grad_args_name(grad_op_item['outputs'], args_map) for op_args in op_fluid_map_list: new_op_name, op_name = get_phi_and_fluid_op_name(op_args['op']) @@ -350,39 +325,32 @@ def replace_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict): int_array_configs[ op_args[key][args_item['name']] ] = int_array_configs[args_item['name']] - args_item['name'] = op_args[key][args_item['name']] - if has_backward: - for args_item in backward_op_item['forward'][key]: - if args_item['name'] in op_args[key]: - args_item['name'] = op_args[key][args_item['name']] - forward_op_item["attr_dict"] = to_named_dict(forward_op_item["attrs"]) + args_item['fluid_name'] = op_args[key][ + args_item['name'] + ] update_common_params_name( forward_op_item, args_map, scalar_configs, int_array_configs ) if has_backward: - update_grad_op_compat_name(backward_op_item, args_map) + # update fluid info in backward + add_grad_op_compat_name(backward_op_item, args_map) update_common_params_name( backward_op_item, args_map, scalar_configs, int_array_configs ) - backward_op_item["attr_dict"] = to_named_dict( - backward_op_item["attrs"] - ) if 'backward' not in op_args: continue backward_op_list = op_args['backward'].split(',') - # add fluid args name in composite map - for backward_op in backward_op_list: - if ( - "composite" - in backward_op_dict[backward_op.split('(')[0].strip()] - ): - add_fluid_info_in_composite( - backward_op_dict[backward_op]["composite"], args_map - ) - _, bw_op_name = get_phi_and_fluid_op_name(backward_op_list[0]) + phi_bw_op_name, bw_op_name = get_phi_and_fluid_op_name( + backward_op_list[0] + ) + if ( + forward_op_item["backward_composite"] is not None + and phi_bw_op_name != bw_op_name + ): + forward_op_item["backward_composite"] = bw_op_name forward_op_item['backward'] = bw_op_name backward_op_item['op_name'] = bw_op_name @@ -393,18 +361,20 @@ def replace_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict): double_grad_op_name, ) = get_phi_and_fluid_op_name(backward_op_list[1]) double_grad_item = backward_op_dict[phi_double_grad_op_name] + if ( + backward_op_item["backward_composite"] is not None + and phi_double_grad_op_name != double_grad_op_name + ): + backward_op_item["backward_composite"] = double_grad_op_name backward_op_item['backward'] = double_grad_op_name double_grad_item['op_name'] = double_grad_op_name - update_grad_op_compat_name(double_grad_item, args_map) + add_grad_op_compat_name(double_grad_item, args_map) update_common_params_name( double_grad_item, args_map, scalar_configs, int_array_configs, ) - double_grad_item["attr_dict"] = to_named_dict( - double_grad_item["attrs"] - ) # for triple grad if len(backward_op_list) > 2: @@ -413,18 +383,22 @@ def replace_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict): triple_grad_op_name, ) = get_phi_and_fluid_op_name(backward_op_list[2]) triple_grad_item = backward_op_dict[phi_triple_grad_op_name] + if ( + double_grad_item["backward_composite"] is not None + and phi_triple_grad_op_name != triple_grad_op_name + ): + double_grad_item[ + "backward_composite" + ] = triple_grad_op_name double_grad_item['backward'] = triple_grad_op_name triple_grad_item['op_name'] = triple_grad_op_name - update_grad_op_compat_name(triple_grad_item, args_map) + add_grad_op_compat_name(triple_grad_item, args_map) update_common_params_name( triple_grad_item, args_map, scalar_configs, int_array_configs, ) - triple_grad_item["attr_dict"] = to_named_dict( - triple_grad_item["attrs"] - ) def process_invoke_op(forward_op_dict, backward_op_dict): @@ -442,20 +416,28 @@ def process_invoke_op(forward_op_dict, backward_op_dict): for input_item in reuse_op['inputs']: bw_op['invoke']['inputs'].append( { + 'fluid_name': input_item['fluid_name'], 'name': input_item['name'], 'value': args_list[args_index], } ) args_index = args_index + 1 + bw_fluid_attrs_set = [ + item['fluid_name'] for item in bw_op['attrs'] + ] for attr in reuse_op['attrs']: if args_index < len(args_list): attr_value = ( f"this->GetAttr(\"{args_list[args_index]}\")" - if args_list[args_index] in bw_op['attr_dict'] + if args_list[args_index] in bw_fluid_attrs_set else args_list[args_index] ) bw_op['invoke']['attrs'].append( - {'name': attr['name'], 'value': attr_value} + { + 'name': attr['name'], + 'fluid_name': attr['fluid_name'], + 'value': attr_value, + } ) args_index = args_index + 1 else: @@ -464,7 +446,8 @@ def process_invoke_op(forward_op_dict, backward_op_dict): bw_op['invoke']['outputs'].append( { 'name': output_item['name'], - 'value': bw_op['outputs'][idx]['name'], + 'fluid_name': output_item['fluid_name'], + 'value': bw_op['outputs'][idx]['fluid_name'], } ) @@ -517,17 +500,26 @@ def main( for op in ops: op['op_name'] = op['name'] + add_fluid_name(op['inputs']) + add_fluid_name(op['attrs']) + add_fluid_name(op['outputs']) for bw_op in backward_ops: bw_op['op_name'] = bw_op['name'] + add_fluid_name(bw_op['inputs']) + add_fluid_name(bw_op['attrs']) + add_fluid_name(bw_op['outputs']) + add_fluid_name(bw_op['forward']['inputs']) + add_fluid_name(bw_op['forward']['attrs']) + add_fluid_name(bw_op['forward']['outputs']) for bw_output in bw_op['outputs']: bw_output['drop_empty_grad'] = True # deal the drop_empty_grad of bw_op by op_compat.yaml parse_drop_empty_grad(op_fluid_map_list, backward_op_dict) - parse_composite_info(ops, backward_ops, backward_op_dict) + add_composite_info(ops, backward_ops, backward_op_dict) - replace_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict) + add_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict) # prepare for invoke case process_invoke_op(forward_op_dict, backward_op_dict) @@ -555,7 +547,6 @@ def main( ops=ops, backward_ops=backward_ops, op_dict=op_dict, - composite_gen_flag=True, ) f.write(msg) ks_template = env.get_template('ks.c.j2') diff --git a/paddle/fluid/operators/generator/generate_sparse_op.py b/paddle/fluid/operators/generator/generate_sparse_op.py index 1da91e3f60005f731a78c08fcfcbde53778e2c43..3eea32091dc8067c3572a9621cccd9d714d50b9e 100644 --- a/paddle/fluid/operators/generator/generate_sparse_op.py +++ b/paddle/fluid/operators/generator/generate_sparse_op.py @@ -28,12 +28,14 @@ from filters import ( to_opmaker_name_cstr, to_pascal_case, to_scalar_tensor_name, + to_variable_names, ) -from generate_op import process_invoke_op +from generate_op import add_fluid_name, process_invoke_op from jinja2 import Environment, FileSystemLoader, StrictUndefined from parse_utils import to_named_dict from tests import ( is_base_op, + is_composite_op, is_initializer_list, is_scalar, is_vec, @@ -60,7 +62,9 @@ env.filters["to_input_name"] = to_input_name env.filters["to_opmaker_name_cstr"] = to_opmaker_name_cstr env.filters["cartesian_prod_mapping"] = cartesian_prod_mapping env.filters["to_composite_grad_opmaker_name"] = to_composite_grad_opmaker_name +env.filters["to_variable_names"] = to_variable_names env.tests["base_op"] = is_base_op +env.tests["composite_op"] = is_composite_op env.tests["vec"] = is_vec env.tests["scalar"] = is_scalar env.tests["initializer_list"] = is_initializer_list @@ -96,9 +100,18 @@ def main(op_yaml_path, backward_yaml_path, output_op_path, output_arg_map_path): op['name'] = op['op_name'] if op["backward"] is not None: op["backward"] = SPARSE_OP_PREFIX + op["backward"] + add_fluid_name(op["inputs"]) + add_fluid_name(op["attrs"]) + add_fluid_name(op["outputs"]) for bw_op in backward_ops: bw_op['op_name'] = SPARSE_OP_PREFIX + bw_op['name'] bw_op['name'] = bw_op['op_name'] + add_fluid_name(bw_op["inputs"]) + add_fluid_name(bw_op["attrs"]) + add_fluid_name(bw_op["outputs"]) + add_fluid_name(bw_op["forward"]["inputs"]) + add_fluid_name(bw_op["forward"]["attrs"]) + add_fluid_name(bw_op["forward"]["outputs"]) if 'invoke' in bw_op: bw_op['invoke']['args'] = [ param.strip() for param in bw_op['invoke']['args'].split(',') @@ -139,7 +152,6 @@ def main(op_yaml_path, backward_yaml_path, output_op_path, output_arg_map_path): ops=ops, backward_ops=backward_ops, op_dict=op_dict, - composite_gen_flag=False, ) f.write(msg) diff --git a/paddle/fluid/operators/generator/generate_static_op.py b/paddle/fluid/operators/generator/generate_static_op.py index 7701f76734a0c0073a59dd0ec5fa441ce1830385..3a825bafb127c254c50f8b49b1b063335862d0df 100644 --- a/paddle/fluid/operators/generator/generate_static_op.py +++ b/paddle/fluid/operators/generator/generate_static_op.py @@ -28,12 +28,14 @@ from filters import ( to_opmaker_name_cstr, to_pascal_case, to_scalar_tensor_name, + to_variable_names, ) -from generate_op import replace_compat_name +from generate_op import add_compat_name, add_fluid_name from jinja2 import Environment, FileSystemLoader, StrictUndefined from parse_utils import to_named_dict from tests import ( is_base_op, + is_composite_op, is_initializer_list, is_scalar, is_vec, @@ -60,7 +62,9 @@ env.filters["to_input_name"] = to_input_name env.filters["to_opmaker_name_cstr"] = to_opmaker_name_cstr env.filters["cartesian_prod_mapping"] = cartesian_prod_mapping env.filters["to_composite_grad_opmaker_name"] = to_composite_grad_opmaker_name +env.filters["to_variable_names"] = to_variable_names env.tests["base_op"] = is_base_op +env.tests["composite_op"] = is_composite_op env.tests["vec"] = is_vec env.tests["scalar"] = is_scalar env.tests["initializer_list"] = is_initializer_list @@ -100,8 +104,11 @@ def main( for op in ops: op['op_name'] = op['name'] + add_fluid_name(op["inputs"]) + add_fluid_name(op["attrs"]) + add_fluid_name(op["outputs"]) - replace_compat_name(op_op_map, forward_op_dict, {}) + add_compat_name(op_op_map, forward_op_dict, {}) if len(ops) == 0: if os.path.isfile(output_op_path): @@ -116,7 +123,6 @@ def main( ops=ops, backward_ops=[], op_dict=forward_op_dict, - composite_gen_flag=False, ) f.write(msg) diff --git a/paddle/fluid/operators/generator/parse_utils.py b/paddle/fluid/operators/generator/parse_utils.py index d5a58a2a94a0ef67ca643c846c577b971ede4a49..31441aadbf8e38431317a439fb50c05490f2d4d6 100644 --- a/paddle/fluid/operators/generator/parse_utils.py +++ b/paddle/fluid/operators/generator/parse_utils.py @@ -294,14 +294,13 @@ def parse_composite( composite_config: str, ) -> Dict[str, Any]: # composite_config: func(args1, args2,.....) - fname = r'(.*?)' - wspace = r'\s*' - fargs = r'(.*?)' - pattern = fr'{fname}{wspace}\({wspace}{fargs}{wspace}\)' - - m = re.search(pattern, composite_config) - func_name = m.group(1) - func_args = m.group(2) + result = re.search( + r"(?P[a-z][a-z0-9_]+)\s*\((?P[^\)]+)\)", + composite_config, + ) + + func_name = result.group("func_name") + func_args = result.group("func_args") composite_dict = {} composite_dict["func_name"] = func_name diff --git a/paddle/fluid/operators/generator/templates/op.c.j2 b/paddle/fluid/operators/generator/templates/op.c.j2 index 23641dad90f1bebc38e7cb76a18248c0f4a9d142..2339822af280fb2050d3e84dea3daa22395913e3 100644 --- a/paddle/fluid/operators/generator/templates/op.c.j2 +++ b/paddle/fluid/operators/generator/templates/op.c.j2 @@ -39,11 +39,9 @@ using paddle::framework::GradVarName; {% else %} {{backward_op_reused_maker(op, op_dict[op["forward"]["name"]], op["invoke"])}} {% endif %} - {% if composite_gen_flag == True %} - {% if op is composite_op %} + {% if op is composite_op %} {{composite_grad_op_maker(op_dict[op["name"]])}} - {% endif %} - {% endif %} + {% endif %} {% endfor %} } // namespace operators } // namespace paddle @@ -51,7 +49,7 @@ using paddle::framework::GradVarName; namespace ops = paddle::operators; {% for op in ops + backward_ops %} {% if op is base_op %} -{{register_op_with_components(op, op_dict)}} +{{register_op_with_components(op)}} {{register_op_version(op)}} {% endif %} {% endfor %} diff --git a/paddle/fluid/operators/generator/templates/operator_utils.c.j2 b/paddle/fluid/operators/generator/templates/operator_utils.c.j2 index 000e56453d934f248ab6e427c722b997bb1d0032..a471efaa562b4cb579ae9ddd1f38193567f7d392 100644 --- a/paddle/fluid/operators/generator/templates/operator_utils.c.j2 +++ b/paddle/fluid/operators/generator/templates/operator_utils.c.j2 @@ -12,7 +12,7 @@ class {{op_name | to_pascal_case}}OpMaker : public framework::OpProtoAndCheckerM {{add_output(loop.index0, output, op_name)}}; {% endfor %} {% for attr in op["attrs"] %} - {% if attr["name"] in op["kernel"]["param"] %} + {% if attr["fluid_name"] in op["kernel"]["param"] %} {{add_attr(loop.index0, attr, op_name)}}; {% endif %} {% endfor %} @@ -27,7 +27,7 @@ TODO: Documentation of {{op_name}} op. {# add input, it could be duplicable or dispensable #} {% macro add_input(i, input, op_name) %}{# inline #} - {% set name = input["name"] %} + {% set name = input["fluid_name"] %} {% set typename = input["typename"] %} AddInput({{name| to_opmaker_name}}, "({{typename}}), input {{i}} of {{op_name}} op.") {%- if typename is vec %} @@ -42,7 +42,7 @@ AddInput({{name| to_opmaker_name}}, "({{typename}}), input {{i}} of {{op_name}} {# add output, it could be duplicable or intermediate, however, optional output is not supported #} {% macro add_output(i, output, op_name) %}{# inline #} - {% set name = output["name"] %} + {% set name = output["fluid_name"] %} {% set typename = output["typename"] %} {% set is_intermediate = output["intermediate"] %} AddOutput({{name | to_opmaker_name}}, "({{typename}}), output {{i}} of {{op_name}} op.") @@ -66,7 +66,7 @@ AddOutput({{name | to_opmaker_name}}, "({{typename}}), output {{i}} of {{op_name {# add attribute, and process default value if needed #} {% macro add_attr(i, attr, op_name) %}{# inline #} - {% set name = attr["name"] %} + {% set name = attr["fluid_name"] %} {% set typename = attr["typename"] %} {% if typename is scalar %} AddInput("{{attr | to_scalar_tensor_name}}", "attribute {{i}} for {{op_name}} op from 0D Tensor.") @@ -153,15 +153,15 @@ KernelSignature {{op["op_name"] | to_pascal_case }}OpArgumentMapping(const Argum {% set kernel_in_type_list = kernel_config["dispatch"][kernel_func][0] %} if ( {%- for input in inputs %} - {%- if input["name"] in kernel_config["param"] %} + {%- if input["fluid_name"] in kernel_config["param"] %} {%- if kernel_in_type_list[input_idx.idx] == "dense" %} -ctx.IsDenseTensorInput("{{input["name"]}}"){{" && " if not loop.last}} +ctx.IsDenseTensorInput("{{input["fluid_name"]}}"){{" && " if not loop.last}} {%- elif kernel_in_type_list[input_idx.idx] == "selected_rows" %} -ctx.IsSelectedRowsInput("{{input["name"]}}"){{" && " if not loop.last}} +ctx.IsSelectedRowsInput("{{input["fluid_name"]}}"){{" && " if not loop.last}} {%- elif kernel_in_type_list[input_idx.idx] == "sparse_coo" %} -ctx.IsSparseCooTensorInput("{{input["name"]}}"){{" && " if not loop.last}} +ctx.IsSparseCooTensorInput("{{input["fluid_name"]}}"){{" && " if not loop.last}} {%- elif kernel_in_type_list[input_idx.idx] == "sparse_csr" %} -ctx.IsSparseCsrTensorInput("{{input["name"]}}"){{" && " if not loop.last}} +ctx.IsSparseCsrTensorInput("{{input["fluid_name"]}}"){{" && " if not loop.last}} {%- endif %} {% set input_idx.idx = input_idx.idx + 1 %} {%- endif %} @@ -210,8 +210,8 @@ PD_REGISTER_ARG_MAPPING_FN({{op["op_name"]}}, phi::{{op["op_name"] | to_pascal_c {% macro get_input_list(inputs, kernel_args) %}{# inline #} paddle::small_vector inputs { {%- for input in inputs %} -{%- if input["name"] in kernel_args %} -{{input["name"] | to_opmaker_name_cstr}}{{", " if not loop.last}} +{%- if input["fluid_name"] in kernel_args %} +{{input["fluid_name"] | to_opmaker_name_cstr}}{{", " if not loop.last}} {%- endif %} {%- endfor %} } @@ -219,8 +219,8 @@ paddle::small_vector inputs { {% macro get_an_attr(attr, kernel_args) %}{# inline #} {% set typename = attr["typename"] %} -{%- if attr["name"] in kernel_args %} -{% set name = attr["name"] %} +{%- if attr["fluid_name"] in kernel_args %} +{% set name = attr["fluid_name"] %} {% if typename is scalar %}{# scalar correspond to a dispensable input and an attr in opmaker #} attrs.emplace_back(ctx.HasInput("{{attr | to_scalar_tensor_name}}") ? "{{attr | to_scalar_tensor_name}}" : "{{name}}"); {%- elif typename == "IntArray" %} @@ -251,7 +251,7 @@ attrs.emplace_back("{{name}}"); {% macro get_output_list(outputs, kernel_args) %}{# inline #} paddle::small_vector outputs { {%- for output in outputs %} -{{output["name"] | to_opmaker_name_cstr}}{{", " if not loop.last}} +{{output["fluid_name"] | to_opmaker_name_cstr}}{{", " if not loop.last}} {%- endfor %} } {%- endmacro %} @@ -263,7 +263,7 @@ phi::KernelKey GetExpectedKernelType( {%if kernel["data_type"] is not none %}{# data type ---------------------------------#} {% if kernel["data_type"]["candidates"] | length == 1 %} {% set data_type_arg = kernel["data_type"]["candidates"][0] %} - {% set inputs = op["inputs"] | map(attribute="name") | list %} + {% set inputs = op["inputs"] | map(attribute="fluid_name") | list %} {% if data_type_arg in inputs %} auto data_type = framework::OperatorWithKernel::IndicateVarDataType(ctx, {{data_type_arg | to_opmaker_name}}); {% if kernel["data_type"]["to_complex_flag"][0] %} @@ -353,9 +353,8 @@ DECLARE_NO_NEED_BUFFER_VARS_INFERER({{op["op_name"] | to_pascal_case}}NoNeedBuff {% endif %} {% endmacro%} -{% macro register_op_with_components(op, op_dict) %} +{% macro register_op_with_components(op) %} {% set name = op["op_name"] %} -{% set phi_name = op["name"] %} REGISTER_OPERATOR({{name}}, ops::{{name | to_pascal_case}}Op, {% if not "forward" in op %}{# it is a forward op #} ops::{{name | to_pascal_case}}OpMaker, @@ -371,8 +370,8 @@ REGISTER_OPERATOR({{name}}, ops::{{name | to_pascal_case}}Op, {% if op is supports_inplace %}{# inplace#} ops::{{name | to_pascal_case}}InplaceInferer, {% endif %} -{% if "phi_backward" in op and op["phi_backward"] is not none and "composite" in op_dict[op["phi_backward"]] %} - ops::{{op["phi_backward"] | to_composite_grad_opmaker_name}}, +{% if "backward_composite" in op and op["backward_composite"] is not none %} + ops::{{op["backward_composite"] | to_composite_grad_opmaker_name}}, {% endif %} {% if op is supports_no_need_buffer %}{# no_need_buffer #} ops::{{name | to_pascal_case}}NoNeedBufferVarInferer, @@ -425,12 +424,12 @@ REGISTER_OP_VERSION({{name}}) {# --------------------------------------- backward op maker ---------------------------------------------- #} {% macro backward_op_maker(op, forward_op ) %} {% set name = op["op_name"] %} - {% set forward_input_names = op["forward"]["inputs"] | map(attribute="name") | list %} - {% set forward_output_names = op["forward"]["outputs"] | map(attribute="name") | list %} - {% set forward_attr_names = op["forward"]["attrs"] | map(attribute="name") | list %} - {% set forward_input_orig_names = forward_op["inputs"] | map(attribute="name") | list %} - {% set forward_output_orig_names = forward_op["outputs"] | map(attribute="name") | list %} - {% set forward_attr_orig_names = forward_op["attrs"] | map(attribute="name") | list %} + {% set forward_input_names = op["forward"]["inputs"] | map(attribute="fluid_name") | list %} + {% set forward_output_names = op["forward"]["outputs"] | map(attribute="fluid_name") | list %} + {% set forward_attr_names = op["forward"]["attrs"] | map(attribute="fluid_name") | list %} + {% set forward_input_orig_names = forward_op["inputs"] | map(attribute="fluid_name") | list %} + {% set forward_output_orig_names = forward_op["outputs"] | map(attribute="fluid_name") | list %} + {% set forward_attr_orig_names = forward_op["attrs"] | map(attribute="fluid_name") | list %} template class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker { public: @@ -441,8 +440,8 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker grad_op->SetType("{{name}}"); {% for input in op["inputs"] %} - grad_op->SetInput({{input["name"] | to_opmaker_name}}, this->{{extract_input_from_forward( - input["name"], + grad_op->SetInput({{input["fluid_name"] | to_opmaker_name}}, this->{{extract_input_from_forward( + input["fluid_name"], forward_input_names, forward_output_names, forward_input_orig_names, @@ -450,8 +449,8 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker {% endfor %} {% for output in op["outputs"] %} - grad_op->SetOutput({{output["name"] | to_opmaker_name}}, this->{{extract_output_from_forward( - output["name"], + grad_op->SetOutput({{output["fluid_name"] | to_opmaker_name}}, this->{{extract_output_from_forward( + output["fluid_name"], forward_input_names, forward_output_names, forward_input_orig_names, @@ -461,7 +460,7 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker grad_op->SetAttrMap(this->Attrs()); {% for attr in op["attrs"] %} - {% set attr_name = attr["name"] %} + {% set attr_name = attr["fluid_name"] %} {% if attr_name in forward_attr_names %} {% if attr["typename"] == "IntArray" %} {% if 'tensor_name' in attr or 'manual_flag' not in attr %} @@ -489,12 +488,12 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker {% macro backward_op_reused_maker(bw_op, forward_op, invoke_op) %} {% set name = bw_op["op_name"] %} - {% set forward_input_names = bw_op["forward"]["inputs"] | map(attribute="name") | list %} - {% set forward_output_names = bw_op["forward"]["outputs"] | map(attribute="name") | list %} - {% set forward_attr_names = bw_op["forward"]["attrs"] | map(attribute="name") | list %} - {% set forward_input_orig_names = forward_op["inputs"] | map(attribute="name") | list %} - {% set forward_output_orig_names = forward_op["outputs"] | map(attribute="name") | list %} - {% set forward_attr_orig_names = forward_op["attrs"] | map(attribute="name") | list %} + {% set forward_input_names = bw_op["forward"]["inputs"] | map(attribute="fluid_name") | list %} + {% set forward_output_names = bw_op["forward"]["outputs"] | map(attribute="fluid_name") | list %} + {% set forward_attr_names = bw_op["forward"]["attrs"] | map(attribute="fluid_name") | list %} + {% set forward_input_orig_names = forward_op["inputs"] | map(attribute="fluid_name") | list %} + {% set forward_output_orig_names = forward_op["outputs"] | map(attribute="fluid_name") | list %} + {% set forward_attr_orig_names = forward_op["attrs"] | map(attribute="fluid_name") | list %} template class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker { public: @@ -505,7 +504,7 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker grad_op->SetType("{{invoke_op["func"]}}"); {% for input in invoke_op["inputs"] %} - grad_op->SetInput({{input["name"] | to_opmaker_name}}, this->{{extract_input_from_forward( + grad_op->SetInput({{input["fluid_name"] | to_opmaker_name}}, this->{{extract_input_from_forward( input["value"], forward_input_names, forward_output_names, @@ -514,7 +513,7 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker {% endfor %} {% for output in invoke_op["outputs"] %} - grad_op->SetOutput({{output["name"] | to_opmaker_name}}, this->{{extract_output_from_forward( + grad_op->SetOutput({{output["fluid_name"] | to_opmaker_name}}, this->{{extract_output_from_forward( output["value"], forward_input_names, forward_output_names, @@ -524,42 +523,49 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker {% endfor %} {% for attr in invoke_op["attrs"] %} - grad_op->SetAttr("{{attr["name"]}}", {{attr["value"]}}); + grad_op->SetAttr("{{attr["fluid_name"]}}", {{attr["value"]}}); {% endfor %} } }; {% endmacro %} -{% macro composite_grad_op_maker(composite_op_dict) %} - {% set op_name = composite_op_dict["name"] %} -class {{op_name | to_composite_grad_opmaker_name}} : public prim::GradCompositeOpMakerBase { +{% macro composite_grad_op_maker(backward_op) %} + {% set op_name = backward_op["op_name"] %} + {% set inputs = backward_op["inputs"] | to_variable_names("name")%} + {% set input_dict = backward_op["input_dict"] %} + {% set fluid_inputs = backward_op["inputs"] | to_variable_names("fluid_name")%} + {% set forward_fluid_inputs = backward_op["forward"]["inputs"] | to_variable_names("fluid_name")%} + {% set forward_fluid_outputs = backward_op["forward"]["outputs"] | to_variable_names("fluid_name")%} + {% set attrs = backward_op["attrs"] | to_variable_names("name") %} + {% set fluid_attrs = backward_op["attrs"] | to_variable_names("fluid_name") %} + {% set attr_dict = backward_op["attr_dict"] %} + {% set outputs = backward_op["outputs"] | to_variable_names("name")%} + {% set output_dict = backward_op["output_dict"] %} + {% set fluid_outputs = backward_op["outputs"] | to_variable_names("fluid_name")%} + {% set composite_func_info = backward_op["composite"] %} +class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradOpMakerBase { public: - using prim::GradCompositeOpMakerBase::GradCompositeOpMakerBase; + using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; void Apply() override { //get inputs -{{construct_composite_input(composite_op_dict)}} +{{construct_composite_input(inputs, fluid_inputs, forward_fluid_inputs, forward_fluid_outputs, input_dict)}} //get attr -{{construct_composite_attr(composite_op_dict)}} +{{construct_composite_attr(attrs, fluid_attrs, attr_dict)}} //get output -{{construct_composite_output(composite_op_dict)}} +{{construct_composite_output(outputs, fluid_outputs, output_dict)}} //get output ptr -{{construct_composite_output_ptr(composite_op_dict)}} +{{construct_composite_output_ptr(outputs, output_dict)}} //get output orginal name -{{get_composite_output_orginal_name(composite_op_dict)}} +{{get_composite_output_orginal_name(outputs, output_dict)}} //call composite backward func -{{call_composite_backward_api(composite_op_dict)}} +{{call_composite_backward_api(composite_func_info)}} //recover output name -{{recover_composite_output_name(composite_op_dict)}} +{{recover_composite_output_name(outputs)}} } }; {%- endmacro %} -{% macro construct_composite_input(composite_op_dict) %} - {% set inputs = composite_op_dict["composite"]["phi_inputs"] %} - {% set input_dict = composite_op_dict["input_dict"] %} - {% set fluid_inputs = composite_op_dict["composite"]["fluid_inputs"] %} - {% set forward_fluid_inputs = composite_op_dict["forward"]["inputs"] | map(attribute="name") | list %} - {% set forward_fluid_outputs = composite_op_dict["forward"]["outputs"] | map(attribute="name") | list %} +{% macro construct_composite_input(inputs, fluid_inputs, forward_fluid_inputs, forward_fluid_outputs, input_dict) %} {% set inputs_length = inputs | length %} {% for i in range(inputs_length) %} {% set input_typename = input_dict[inputs[i]]["typename"] %} @@ -567,83 +573,75 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::GradCompositeO {% if fluid_inputs[i] in forward_fluid_inputs %} {% if input_typename == "Tensor" %} {% if input_optional_flag == True %} - paddle::optional {{inputs[i]}} = this->GetOptionalSingleForwardInput("{{fluid_inputs[i]}}"); - {% elif input_optional_flag == False %} - paddle::experimental::Tensor {{inputs[i]}} = this->GetSingleForwardInput("{{fluid_inputs[i]}}"); + auto {{inputs[i]}} = this->GetOptionalSingleForwardInput("{{fluid_inputs[i]}}"); + {% else %} + auto {{inputs[i]}} = this->GetSingleForwardInput("{{fluid_inputs[i]}}"); {% endif %} {% elif input_typename == "Tensor[]" %} {% if input_optional_flag == True %} - std::vector> {{inputs[i]}} = this->GetOptionalMultiForwardInput("{{fluid_inputs[i]}}"); - {% elif input_optional_flag == False %} - std::vector {{inputs[i]}} = this->GetMultiForwardInput("{{fluid_inputs[i]}}"); + auto {{inputs[i]}} = this->GetOptionalMultiForwardInput("{{fluid_inputs[i]}}"); + {% else %} + auto {{inputs[i]}} = this->GetMultiForwardInput("{{fluid_inputs[i]}}"); {% endif %} {% endif %} {% elif fluid_inputs[i] in forward_fluid_outputs %} {% if input_typename == "Tensor" %} {% if input_optional_flag == True %} - paddle::optional {{inputs[i]}} = this->GetOptionalSingleForwardOutput("{{fluid_inputs[i]}}"); - {% elif input_optional_flag == False %} - paddle::experimental::Tensor {{inputs[i]}} = this->GetSingleForwardOutput("{{fluid_inputs[i]}}"); + auto {{inputs[i]}} = this->GetOptionalSingleForwardOutput("{{fluid_inputs[i]}}"); + {% else %} + auto {{inputs[i]}} = this->GetSingleForwardOutput("{{fluid_inputs[i]}}"); {% endif %} {% elif input_typename == "Tensor[]" %} {% if input_optional_flag == True %} - std::vector> {{inputs[i]}} = this->GetOptionalMultiForwardOutput("{{fluid_inputs[i]}}"); - {% elif input_optional_flag == False %} - std::vector {{inputs[i]}} = this->GetMultiForwardOutput("{{fluid_inputs[i]}}"); + auto {{inputs[i]}} = this->GetOptionalMultiForwardOutput("{{fluid_inputs[i]}}"); + {% else %} + auto {{inputs[i]}} = this->GetMultiForwardOutput("{{fluid_inputs[i]}}"); {% endif %} {% endif %} {% elif fluid_inputs[i][:-5] in forward_fluid_outputs %} {% if input_typename == "Tensor" %} {% if input_optional_flag == True %} - paddle::optional {{inputs[i]}} = this->GetOptionalSingleOutputGrad("{{fluid_inputs[i][:-5]}}"); - {% elif input_optional_flag == False %} - paddle::experimental::Tensor {{inputs[i]}} = this->GetSingleOutputGrad("{{fluid_inputs[i][:-5]}}"); + auto {{inputs[i]}} = this->GetOptionalSingleOutputGrad("{{fluid_inputs[i][:-5]}}"); + {% else %} + auto {{inputs[i]}} = this->GetSingleOutputGrad("{{fluid_inputs[i][:-5]}}"); {% endif %} {% elif input_typename == "Tensor[]" %} {% if input_optional_flag == True %} - std::vector> {{inputs[i]}} = this->GetOptionalMultiOutputGrad("{{fluid_inputs[i][:-5]}}"); - {% elif input_optional_flag == False %} - std::vector {{inputs[i]}} = this->GetMultiOutputGrad("{{fluid_inputs[i][:-5]}}"); + auto {{inputs[i]}} = this->GetOptionalMultiOutputGrad("{{fluid_inputs[i][:-5]}}"); + {% else %} + auto {{inputs[i]}} = this->GetMultiOutputGrad("{{fluid_inputs[i][:-5]}}"); {%- endif %} {%- endif %} {%- endif %} {%- endfor %} {%- endmacro %} -{% macro construct_composite_attr(composite_op_dict) %} - {% set attrs = composite_op_dict["composite"]["phi_attrs"] %} - {% set fluid_attrs = composite_op_dict["composite"]["fluid_attrs"] %} - {% set fluid_attrs_dict = composite_op_dict["attr_dict"] %} +{% macro construct_composite_attr(attrs, fluid_attrs, attr_dict) %} {% set attrs_length = attrs | length %} {% for i in range(attrs_length) %} - {% set attrs_data_type = fluid_attrs_dict[fluid_attrs[i]]["typename"] | to_op_attr_type %} - {{attrs_data_type}} {{attrs[i]}} = this->Attr<{{attrs_data_type}}>("{{fluid_attrs[i]}}"); + {% set attrs_data_type = attr_dict[attrs[i]]["typename"] | to_op_attr_type %} + const {{attrs_data_type}} {{attrs[i]}} = this->Attr<{{attrs_data_type}}>("{{fluid_attrs[i]}}"); {% endfor %} {%- endmacro %} -{% macro construct_composite_output(composite_op_dict) %} - {% set outputs = composite_op_dict["composite"]["phi_outputs"] %} - {% set fluid_outputs = composite_op_dict["composite"]["fluid_outputs"] %} - {% set outputs_dict = composite_op_dict["output_dict"] %} +{% macro construct_composite_output(outputs, fluid_outputs, output_dict) %} {% set outputs_length = outputs | length %} {% for i in range(outputs_length) %} - {% set output_typename = outputs_dict[outputs[i]]["typename"] %} + {% set output_typename = output_dict[outputs[i]]["typename"] %} {% if output_typename == "Tensor" %} - paddle::experimental::Tensor {{outputs[i] + "_t"}} = this->GetSingleInputGrad("{{fluid_outputs[i][:-5]}}"); + auto {{outputs[i] + "_t"}} = this->GetSingleInputGrad("{{fluid_outputs[i][:-5]}}"); {% elif output_typename == "Tensor[]" %} - std::vector {{outputs[i] + "_t"}} = this->GetMultiInputGrad("{{fluid_outputs[i][:-5]}}"); + auto {{outputs[i] + "_t"}} = this->GetMultiInputGrad("{{fluid_outputs[i][:-5]}}"); {%- endif %} {%- endfor %} {%- endmacro %} -{% macro construct_composite_output_ptr(composite_op_dict) %} - {% set outputs = composite_op_dict["composite"]["phi_outputs"] %} - {% set outputs_dict = composite_op_dict["output_dict"] %} +{% macro construct_composite_output_ptr(outputs, output_dict) %} {% set outputs_length = outputs | length %} {% for i in range(outputs_length) %} - {% set output_typename = outputs_dict[outputs[i]]["typename"] %} + {% set output_typename = output_dict[outputs[i]]["typename"] %} {% if output_typename == "Tensor" %} - paddle::experimental::Tensor* {{outputs[i]}} = this->GetOutputPtr(&{{outputs[i]+ "_t"}}); + auto {{outputs[i]}} = this->GetOutputPtr(&{{outputs[i]+ "_t"}}); {% elif output_typename == "Tensor[]" %} std::vector {{outputs[i]}}({{outputs[i] + "_t"}}.size()); for(size_t i = 0; i < {{outputs[i]}}.size(); ++i){ @@ -654,27 +652,24 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::GradCompositeO {%- endfor %} {%- endmacro %} -{% macro get_composite_output_orginal_name(composite_op_dict) %} - {% set outputs = composite_op_dict["composite"]["phi_outputs"] %} - {% set outputs_dict = composite_op_dict["output_dict"] %} +{% macro get_composite_output_orginal_name(outputs, output_dict) %} {% set outputs_length = outputs | length %} {% for i in range(outputs_length) %} - {% set output_typename = outputs_dict[outputs[i]]["typename"] %} + {% set output_typename = output_dict[outputs[i]]["typename"] %} {% if output_typename == "Tensor" %} - std::string {{outputs[i] + "_name"}} = this->GetOutputName({{outputs[i] + "_t"}}); + auto {{outputs[i] + "_name"}} = this->GetOutputName({{outputs[i] + "_t"}}); {% elif output_typename == "Tensor[]" %} - std::vector {{outputs[i] + "_name"}} = this->GetOutputName({{outputs[i] + "_t"}}); + auto {{outputs[i] + "_name"}} = this->GetOutputName({{outputs[i] + "_t"}}); {%- endif %} {%- endfor %} {%- endmacro %} -{% macro call_composite_backward_api(composite_op_dict) %} - VLOG(3) << "Runing {{composite_op_dict["composite"]["func_name"]}} composite func"; - prim::{{composite_op_dict["composite"]["func_name"]}}({{composite_op_dict["composite"]["func_args"]}}); +{% macro call_composite_backward_api(composite_func_info) %} + VLOG(3) << "Runing {{composite_func_info["func_name"]}} composite func"; + prim::{{composite_func_info["func_name"]}}({{composite_func_info["func_args"]}}); {%- endmacro %} -{% macro recover_composite_output_name(composite_op_dict) %} - {% set outputs = composite_op_dict["composite"]["phi_outputs"] %} +{% macro recover_composite_output_name(outputs) %} {% set outputs_length = outputs | length %} {% for i in range(outputs_length) %} this->RecoverOutputName({{outputs[i] + "_t"}}, {{outputs[i] + "_name"}}); diff --git a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc index 2b337887faa3f8fe2dde678fa6caec12256adeb3..25e6ad9b65cc0662fd3ee5f1811cc1d20f2473f3 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc +++ b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc @@ -64,9 +64,9 @@ class ReduceSumOpGradMaker : public framework::SingleGradOpMaker { } }; -class ReduceSumCompositeGradOpMaker : public prim::GradCompositeOpMakerBase { +class ReduceSumCompositeGradOpMaker : public prim::CompositeGradOpMakerBase { public: - using prim::GradCompositeOpMakerBase::GradCompositeOpMakerBase; + using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; void Apply() override { // get inputs paddle::experimental::Tensor x = this->GetSingleForwardInput("X"); diff --git a/paddle/fluid/prim/tests/test_static_prim.cc b/paddle/fluid/prim/tests/test_static_prim.cc index fe7a6ca4040448306f79978e527dafa10c9a9a27..313a3ccc99b74de65305d8d8d1b07f06760e4593 100644 --- a/paddle/fluid/prim/tests/test_static_prim.cc +++ b/paddle/fluid/prim/tests/test_static_prim.cc @@ -135,9 +135,9 @@ struct TestBaseProgram { int idx_{0}; }; -class TestGradCompositeGradMaker : public GradCompositeOpMakerBase { +class TestCompositeGradMaker : public CompositeGradOpMakerBase { public: - using prim::GradCompositeOpMakerBase::GradCompositeOpMakerBase; + using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; void Apply() override {} }; @@ -177,7 +177,7 @@ TEST(StaticPrim, TanhBackwardComposite) { std::vector> grad_ops = std::move(framework::OpInfoMap::Instance() .Get(forward_opdesc->Type()) - .GradCompOpMaker()(*forward_opdesc, + .CompGradOpMaker()(*forward_opdesc, std::unordered_set(), &grad_to_var, target_block, @@ -250,11 +250,11 @@ TEST(StaticCompositeGradMaker, TestMutiInputMethod) { auto* forward_opdesc = target_block->AllOps()[0]; std::unordered_map grad_to_var; std::vector grad_sub_block; - auto test = TestGradCompositeGradMaker(*forward_opdesc, - std::unordered_set(), - &grad_to_var, - target_block, - grad_sub_block); + auto test = TestCompositeGradMaker(*forward_opdesc, + std::unordered_set(), + &grad_to_var, + target_block, + grad_sub_block); test(); std::vector muti_fw_input = test.GetMultiForwardInput("X"); @@ -312,11 +312,11 @@ TEST(StaticCompositeGradMaker, TestMutiOutputMethod) { auto* forward_opdesc = target_block->AllOps()[0]; std::unordered_map grad_to_var; std::vector grad_sub_block; - auto test = TestGradCompositeGradMaker(*forward_opdesc, - std::unordered_set(), - &grad_to_var, - target_block, - grad_sub_block); + auto test = TestCompositeGradMaker(*forward_opdesc, + std::unordered_set(), + &grad_to_var, + target_block, + grad_sub_block); test(); paddle::experimental::Tensor fw_input = test.GetSingleForwardInput("X"); paddle::optional opt_fw_input = diff --git a/paddle/fluid/prim/utils/static/composite_grad_desc_maker.h b/paddle/fluid/prim/utils/static/composite_grad_desc_maker.h index c2e7ca4ec57e2b11f6ce76a549408bcabdfbd1be..e391d8ac5300b184c8d46e9cdc26983bec037fb8 100644 --- a/paddle/fluid/prim/utils/static/composite_grad_desc_maker.h +++ b/paddle/fluid/prim/utils/static/composite_grad_desc_maker.h @@ -41,9 +41,9 @@ namespace prim { argument DropEmptyIG in the derived classes. */ -class GradCompositeOpMakerBase { +class CompositeGradOpMakerBase { public: - explicit GradCompositeOpMakerBase( + explicit CompositeGradOpMakerBase( const framework::OpDesc& fwd_op, const std::unordered_set& no_grad_set, std::unordered_map* grad_to_var, @@ -61,7 +61,7 @@ class GradCompositeOpMakerBase { acting_program_.MutableBlock(0)); } - virtual ~GradCompositeOpMakerBase() = default; + virtual ~CompositeGradOpMakerBase() = default; virtual std::vector> operator()() { this->Apply(); diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 36e24364068124151ace53061fd6b37de420f8aa..556965770738913780806f134a2b7a9e4289939d 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1251,7 +1251,7 @@ All parameter, weight, gradient are variables in Paddle. auto op_info = framework::OpInfoMap::Instance().Get(op_desc.Type()); auto grad_op_maker = op_info.GradOpMaker(); - auto grad_comp_op_maker = op_info.GradCompOpMaker(); + auto grad_comp_op_maker = op_info.CompGradOpMaker(); if ((grad_op_maker == nullptr) && (grad_comp_op_maker == nullptr)) { // Normally, proto_ should not be null, except some special @@ -1259,7 +1259,7 @@ All parameter, weight, gradient are variables in Paddle. std::string type = op_info.proto_ ? op_info.proto_->type() : "unknown"; PADDLE_THROW(platform::errors::NotFound( - "Neither operator %s's GradOpMaker nor GradCompOpMaker has " + "Neither operator %s's GradOpMaker nor CompGradOpMaker has " "been registered.\nPlease check whether (%s) operator has " "gradient operator.\nIf not, please set stop_gradient to be " "True for its input and output variables using " @@ -1268,10 +1268,10 @@ All parameter, weight, gradient are variables in Paddle. type.c_str())); } - // In PrimEnabled mode, the priority of GradCompOpMaker is greater + // In PrimEnabled mode, the priority of CompGradOpMaker is greater // than GradCompMaker as we need split first-order grad operator into // primitive operators for compiler. In PrimDisabled mode, the - // priority of GradCompOpMaker is less than GradCompMaker for better + // priority of CompGradOpMaker is less than GradCompMaker for better // performance. std::vector> grad_op_descs; if (paddle::prim::PrimCommonUtils::IsBwdPrimEnabled()) {