{# ----------------------------- op maker ----------------------------------- #} {% macro op_maker(op) %} {% set op_name = op["op_name"] %} class {{op_name | to_pascal_case}}OpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { {% filter indent(4, True) %} {% for input in op["inputs"] %} {{add_input(loop.index0, input, op_name)}}; {% endfor %} {% for output in op["outputs"] %} {{add_output(loop.index0, output, op_name)}}; {% endfor %} {% for attr in op["attrs"] %} {% if attr["fluid_name"] in op["kernel"]["param"] %} {{add_attr(loop.index0, attr, op_name)}}; {% endif %} {% endfor %} {% endfilter %} AddComment(R"DOC( TODO: Documentation of {{op_name}} op. )DOC"); } }; {% endmacro %} {# add input, it could be duplicable or dispensable #} {% macro add_input(i, input, op_name) %}{# inline #} {% set name = input["fluid_name"] %} {% set typename = input["typename"] %} AddInput({{name| to_opmaker_name}}, "({{typename}}), input {{i}} of {{op_name}} op.") {%- if typename is vec %} .AsDuplicable() {%- endif %} {%- if input["optional"] %} .AsDispensable() {%- endif %} {%- endmacro %} {# add output, it could be duplicable or intermediate, however, optional output is not supported #} {% macro add_output(i, output, op_name) %}{# inline #} {% set name = output["fluid_name"] %} {% set typename = output["typename"] %} {% set is_intermediate = output["intermediate"] %} AddOutput({{name | to_opmaker_name}}, "({{typename}}), output {{i}} of {{op_name}} op.") {%- if typename is vec %} .AsDuplicable() {%- endif %} {%- if is_intermediate %} .AsIntermediate() {%- endif %} {%- if output["optional"] %} .AsDispensable() {%- endif %} {%- if "is_extra" in output and output["is_extra"] %} .AsExtra() {%- endif %} {%- endmacro %} {# add attribute, and process default value if needed #} {% macro add_attr(i, attr, op_name) %}{# inline #} {% set name = attr["fluid_name"] %} {% set typename = attr["typename"] %} {% if typename is scalar %} AddInput("{{attr | to_scalar_tensor_name}}", "attribute {{i}} for {{op_name}} op from 0D Tensor.") .AsDispensable(); AddAttr<{{attr["data_type"]}}>("{{name}}", "({{attr["data_type"]}}), attribute {{i}} for {{op_name}} op.") {% elif typename == "IntArray" %}{# the type has been renamed #} {% if 'tensor_name' in attr or 'manual_flag' not in attr %} AddInput("{{attr | to_int_array_tensor_name}}", "attribute {{i}} for {{op_name}} op from 1D integer Tensor.") .AsDispensable(); {% endif %} {% if 'tensors_name' in attr or 'manual_flag' not in attr %} AddInput("{{attr | to_int_array_tensors_name}}", "attribute {{i}} for {{op_name}} op from list fo 0D integer Tensors.") .AsDuplicable() .AsDispensable(); {% endif %} AddAttr<{{attr["data_type"]}}>("{{name}}", "({{attr["data_type"]}}), attribute {{i}} for {{op_name}} op.") {% else %} AddAttr<{{typename | to_op_attr_type}}>("{{name}}", "({{typename | to_op_attr_type}}), attribute {{i}} for {{op_name}} op.") {% endif %} {% if "default_value" in attr %} .SetDefault({{process_default_value(attr)}}) {%- endif %} {% if "is_support_tensor" in attr and attr["is_support_tensor"] %} .SupportTensor() {%- endif %} {%- endmacro %} {# process default value for attributes, some attribute has different types and different default values in op & opmaker #} {% macro process_default_value(attr) %}{# inline #} {% set default_value = attr["default_value"] %} {% set typename = attr["typename"] %} {% if typename == "DataType" %}{# convert back to VarType #} {% if default_value == "DataType::UNDEFINED" %} -1 {%- else %} static_cast(framework::TransToProtoVarType(experimental::{{default_value}})) {%- endif %} {%- elif typename == "DataLayout" %} {# does DataLayout need any processing?#} static_cast(experimental::{{default_value}}) {%- elif typename == "Place" %}{# construct a Place to get the type #} static_cast(phi::Place({{"phi::" if not default_value is initializer_list}}{{default_value}}).GetType()) {%- else %}{# pass through as-is #} {{default_value}} {%- endif %} {%- endmacro %} {# --------------------------------------- name mapping ---------------------------------------------- #} {% macro name_map(op) %} /* ****************************************************************** NOTE: The following codes are for 'get_compat_kernel_signature.py' All possible KernelSignatures returned by {{op["name"] | to_pascal_case }}OpArgumentMapping: {{op | cartesian_prod_mapping}} ****************************************************************** */ KernelSignature {{op["op_name"] | to_pascal_case }}OpArgumentMapping(const ArgumentMappingContext& ctx) { {% set kernel_args = op["kernel"]["param"] %} {{get_input_list(op["inputs"], kernel_args)}}; paddle::small_vector attrs; {% for attr in op["attrs"]%} {% filter indent(2)%} {{get_an_attr(attr, kernel_args)}} {% endfilter %} {% endfor %} {{get_output_list(op["outputs"], kernel_args)}}; {% if op["kernel"]["func"] | length == 1 %} KernelSignature sig("{{op["kernel"]["func"][0]}}", std::move(inputs), std::move(attrs), std::move(outputs)); return sig; {% else %}{# it has kernel for selected rows #} const char* kernel_name = ctx.IsSelectedRowsInput({{kernel_args[0] | to_opmaker_name_cstr}}) ? "{{op["kernel"]["func"][1]}}" : "{{op["kernel"]["func"][0]}}"; KernelSignature sig (kernel_name, std::move(inputs), std::move(attrs), std::move(outputs)); return sig; {%endif%} } {% endmacro %} {% macro get_kernel_dispatch(inputs, kernel_config) %}{# inline #} {%- for kernel_func in kernel_config["func"] %} {% set input_idx = namespace(idx=0) %} {% set kernel_in_type_list = kernel_config["dispatch"][kernel_func][0] %} if ( {%- for input in inputs %} {%- if input["fluid_name"] in kernel_config["param"] %} {%- if kernel_in_type_list[input_idx.idx] == "dense" %} ctx.IsDenseTensorInput("{{input["fluid_name"]}}"){{" && " if not loop.last}} {%- elif kernel_in_type_list[input_idx.idx] == "selected_rows" %} ctx.IsSelectedRowsInput("{{input["fluid_name"]}}"){{" && " if not loop.last}} {%- elif kernel_in_type_list[input_idx.idx] == "sparse_coo" %} ctx.IsSparseCooTensorInput("{{input["fluid_name"]}}"){{" && " if not loop.last}} {%- elif kernel_in_type_list[input_idx.idx] == "sparse_csr" %} ctx.IsSparseCsrTensorInput("{{input["fluid_name"]}}"){{" && " if not loop.last}} {%- endif %} {% set input_idx.idx = input_idx.idx + 1 %} {%- endif %} {%- endfor %}) { kernel_name = "{{kernel_func}}"; } {%- endfor %} {%- endmacro %} {% macro sparse_op_name_map(op) %} /* ****************************************************************** NOTE: The following codes are for 'get_compat_kernel_signature.py' All possible KernelSignatures returned by {{op["name"] | to_pascal_case }}OpArgumentMapping: {{op | cartesian_prod_mapping}} ****************************************************************** */ KernelSignature {{op["op_name"] | to_pascal_case }}OpArgumentMapping(const ArgumentMappingContext& ctx) { {% set kernel_args = op["kernel"]["param"] %} {{get_input_list(op["inputs"], kernel_args)}}; paddle::small_vector attrs; {% for attr in op["attrs"]%} {% filter indent(2)%} {{get_an_attr(attr, kernel_args)}} {% endfilter %} {% endfor %} {{get_output_list(op["outputs"], kernel_args)}}; const char* kernel_name = "unregistered"; {{get_kernel_dispatch(op["inputs"], op["kernel"])}} KernelSignature sig (kernel_name, std::move(inputs), std::move(attrs), std::move(outputs)); return sig; } {% endmacro %} {% macro register_base_kernel_name(op) %} PD_REGISTER_BASE_KERNEL_NAME({{op["op_name"]}}, {{op["name"]}}); {%- endmacro %} {% macro register_name_map(op) %} PD_REGISTER_ARG_MAPPING_FN({{op["op_name"]}}, phi::{{op["op_name"] | to_pascal_case}}OpArgumentMapping); {%- endmacro %} {% macro get_input_list(inputs, kernel_args) %}{# inline #} paddle::small_vector inputs { {%- for input in inputs %} {%- if input["fluid_name"] in kernel_args %} {{input["fluid_name"] | to_opmaker_name_cstr}}{{", " if not loop.last}} {%- endif %} {%- endfor %} } {%- endmacro %} {% macro get_an_attr(attr, kernel_args) %}{# inline #} {% set typename = attr["typename"] %} {%- if attr["fluid_name"] in kernel_args %} {% set name = attr["fluid_name"] %} {% if typename is scalar %}{# scalar correspond to a dispensable input and an attr in opmaker #} attrs.emplace_back(ctx.HasInput("{{attr | to_scalar_tensor_name}}") ? "{{attr | to_scalar_tensor_name}}" : "{{name}}"); {%- elif typename == "IntArray" %} {% if 'tensor_name' in attr and 'tensors_name' not in attr %} attrs.emplace_back( ctx.HasInput("{{attr | to_int_array_tensor_name}}") ? "{{attr | to_int_array_tensor_name}}" : "{{name}}"); {% elif 'tensor_name' not in attr and 'tensors_name' in attr %} attrs.emplace_back( ctx.InputSize("{{attr | to_int_array_tensors_name}}") > 0 ? "{{attr | to_int_array_tensors_name}}" : "{{name}}"); {% else %} attrs.emplace_back( ctx.HasInput("{{attr | to_int_array_tensor_name}}") ? "{{attr | to_int_array_tensor_name}}" : ctx.InputSize("{{attr | to_int_array_tensors_name}}") > 0 ? "{{attr | to_int_array_tensors_name}}" : "{{name}}"); {%- endif %} {%- else %} attrs.emplace_back("{{name}}"); {%- endif %} {%- endif %} {%- endmacro %} {% macro get_output_list(outputs, kernel_args) %}{# inline #} paddle::small_vector outputs { {%- for output in outputs %} {{output["fluid_name"] | to_opmaker_name_cstr}}{{", " if not loop.last}} {%- endfor %} } {%- endmacro %} {% macro get_expected_kernel(op) %} {% set kernel = op["kernel"] %} phi::KernelKey GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { {%if kernel["data_type"] is not none %}{# data type ---------------------------------#} {% if kernel["data_type"]["candidates"] | length == 1 %} {% set data_type_arg = kernel["data_type"]["candidates"][0] %} {% set inputs = op["inputs"] | map(attribute="fluid_name") | list %} {% if data_type_arg in inputs %} auto data_type = framework::OperatorWithKernel::IndicateVarDataType(ctx, {{data_type_arg | to_opmaker_name}}); {% if kernel["data_type"]["to_complex_flag"][0] %} data_type = framework::ToComplexType(data_type); {% endif %} {% else %}{# it is an attribute and probably named dtype#} auto data_type = framework::proto::VarType::Type(ctx.Attr("{{data_type_arg}}")); {% endif %} {% elif kernel["data_type"]["candidates"] | length == 2 %} {% set data_type_args = kernel["data_type"]["candidates"] %} auto data_type = framework::proto::VarType::Type(ctx.Attr("{{data_type_args[0]}}"); if (data_type == static_cast(-1)) { data_type = framework::OperatorWithKernel::IndicateVarDataType(ctx, {{data_type_args[1] | to_opmaker_name}}); } {% endif %} {% elif "complex_promote" in op and "forward" not in op%} {% set inputs = op["complex_promote"]%} auto data_type = OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "{{inputs[0]}}", "{{inputs[1]}}"); {% endif %} return phi::KernelKey(data_type, ctx.GetPlace()); } {% endmacro -%} {% macro get_kernel_for_var(op) %} {% set skip_args = none %} {% if op["data_transform"] is not none%} {% if "skip_transform" in op["data_transform"] %} {% set skip_args = op["data_transform"]["skip_transform"] %} {% elif "support_trans_dtype" in op["data_transform"] %} {% set skip_args = op["data_transform"]["support_trans_dtype"] %} {% endif %} {% endif %} {% set var_name = "var_name" -%} phi::KernelKey GetKernelTypeForVar( const std::string& {{var_name}}, const phi::DenseTensor& tensor, const phi::KernelKey& expected_kernel_type) const override { {%if skip_args is not none%}{# deal data_transform #} {% set skip_args_len = skip_args | length %} if ( {%- for skip_arg in skip_args -%} var_name == "{{ skip_arg }}" {%- if skip_args_len != 1 and loop.index != skip_args_len %} || {% endif -%} {%- endfor -%} ){ {% if "skip_transform" in op["data_transform"] %} return phi::KernelKey(phi::Backend::ALL_BACKEND, expected_kernel_type.layout(), expected_kernel_type.dtype()); {% elif "support_trans_dtype" in op["data_transform"] %} return phi::KernelKey(tensor.place(), tensor.layout(), tensor.dtype()); {% endif %} } {% else %}{# deal complex_promote #} if (framework::IsComplexType(expected_kernel_type.dtype())) { // only promote inputs’s types when contains complex input return phi::KernelKey(tensor.place(), tensor.layout(), tensor.dtype()); } {% endif %} else{ return phi::KernelKey( tensor.place(), tensor.layout(), expected_kernel_type.dtype()); } } {% endmacro %} {# --------------------------------------- operator ---------------------------------------------- #} {% macro operator(op) %} class {{op["op_name"] | to_pascal_case}}Op : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; {# ----------- get expected kernel type function -------------------------- #} {% set kernel = op["kernel"] %} {% if kernel["data_type"] is not none or "complex_promote" in op or "data_transform" in op%} protected: {% if kernel["data_type"] is not none or "complex_promote" in op %} {% filter indent(2, True)%} {{get_expected_kernel(op)}} {% endfilter %} {% endif %} {% endif %} {%- if "data_transform" in op and op["data_transform"] is not none -%} {% filter indent(2, True) %} {{get_kernel_for_var(op)}} {% endfilter %} {%- elif "complex_promote" in op and op["complex_promote"] is not none -%} {% filter indent(2, True) %} {{get_kernel_for_var(op)}} {% endfilter %} {%- endif %} }; DECLARE_INFER_SHAPE_FUNCTOR({{op["op_name"]}}, {{op["op_name"] | to_pascal_case}}InferShapeFunctor, PD_INFER_META(phi::{{op["infer_meta"]["func"]}})); {# inplace inferer #} {% if op["inplace"] is not none %} {% set inplace_map %} {% for source, target in op["inplace"].items() %} {{"{"}}{{target | to_opmaker_name}}, {{source | to_opmaker_name}}{{"}"}}{{", " if not loop.last}} {%- endfor %} {%- endset %} DECLARE_INPLACE_OP_INFERER({{op["op_name"] | to_pascal_case}}InplaceInferer, {{inplace_map}}); {% endif %} {# no_need_buffer inferer #} {% if op["no_need_buffer"] is not none %} DECLARE_NO_NEED_BUFFER_VARS_INFERER({{op["op_name"] | to_pascal_case}}NoNeedBufferVarInferer, {{op["no_need_buffer"] | map("to_opmaker_name") | join(", ")}}); {% endif %} {% endmacro%} {% macro register_op_with_components(op) %} {% set name = op["op_name"] %} REGISTER_OPERATOR({{name}}, ops::{{name | to_pascal_case}}Op, {% if not "forward" in op %}{# it is a forward op #} ops::{{name | to_pascal_case}}OpMaker, {% endif %} {% if "backward" in op and op["backward"] is not none %}{# backward #} {% set backward_name = op["backward"] %} ops::{{backward_name | to_pascal_case}}OpMaker, ops::{{backward_name | to_pascal_case}}OpMaker, {% else %} paddle::framework::EmptyGradOpMaker, paddle::framework::EmptyGradOpMaker, {% endif %} {% if op is supports_inplace %}{# inplace#} ops::{{name | to_pascal_case}}InplaceInferer, {% endif %} {% if "backward_composite" in op and op["backward_composite"] is not none %} ops::{{op["backward_composite"] | to_composite_grad_opmaker_name}}, {% endif %} {% if op is supports_no_need_buffer %}{# no_need_buffer #} ops::{{name | to_pascal_case}}NoNeedBufferVarInferer, {% endif %} ops::{{name | to_pascal_case}}InferShapeFunctor); {% endmacro %} {% macro register_op_version(op) %} {% if "version" in op %} {% set name = op["op_name"] %} REGISTER_OP_VERSION({{name}}) {% for checkpoint in op["version"]%} .AddCheckpoint( R"ROC({{checkpoint["checkpoint"]}})ROC", paddle::framework::compatible::OpVersionDesc() {% for action in checkpoint["action"]%} {% if "add_input" in action %} .NewInput("{{action["add_input"]}}", "{{action["comment"]}}"){{")" if loop.last}} {% endif %} {% if "delete_input" in action %} .DeleteInput("{{action["delete_input"]}}", "{{action["comment"]}}"){{")" if loop.last}} {% endif %} {% if "modify_input" in action %} .ModifyInput("{{action["modify_input"]}}", "{{action["comment"]}}"){{")" if loop.last}} {% endif %} {% if "add_output" in action %} .NewOutput("{{action["add_output"]}}", "{{action["comment"]}}"){{")" if loop.last}} {% endif %} {% if "delete_output" in action %} .DeleteOutput("{{action["delete_output"]}}", "{{action["comment"]}}"){{")" if loop.last}} {% endif %} {% if "modify_output" in action %} .ModifyOutput("{{action["modify_output"]}}", "{{action["comment"]}}"){{")" if loop.last}} {% endif %} {% if "add_attr" in action %} .NewAttr("{{action["add_attr"]}}", "{{action["comment"]}}", {{action["default"]}}){{")" if loop.last}} {% endif %} {% if "delete_attr" in action %} .DeleteAttr("{{action["delete_attr"]}}", "{{action["comment"]}}"){{")" if loop.last}} {% endif %} {% if "fix_bug" in action %} .BugfixWithBehaviorChanged("{{action["comment"]}}"){{")" if loop.last}} {% endif %} {% endfor %} {% endfor %}; {% endif %} {% endmacro %} {# --------------------------------------- backward op maker ---------------------------------------------- #} {% macro backward_op_maker(op, forward_op ) %} {% set name = op["op_name"] %} {% set forward_input_names = op["forward"]["inputs"] | map(attribute="fluid_name") | list %} {% set forward_output_names = op["forward"]["outputs"] | map(attribute="fluid_name") | list %} {% set forward_attr_names = op["forward"]["attrs"] | map(attribute="fluid_name") | list %} {% set forward_input_orig_names = forward_op["inputs"] | map(attribute="fluid_name") | list %} {% set forward_output_orig_names = forward_op["outputs"] | map(attribute="fluid_name") | list %} {% set forward_attr_orig_names = forward_op["attrs"] | map(attribute="fluid_name") | list %} template class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker { public: using framework::SingleGradOpMaker::SingleGradOpMaker; protected: void Apply(GradOpPtr grad_op) const override { grad_op->SetType("{{name}}"); {% for input in op["inputs"] %} grad_op->SetInput({{input["fluid_name"] | to_opmaker_name}}, this->{{extract_input_from_forward( input["fluid_name"], forward_input_names, forward_output_names, forward_input_orig_names, forward_output_orig_names)}}); {% endfor %} {% for output in op["outputs"] %} grad_op->SetOutput({{output["fluid_name"] | to_opmaker_name}}, this->{{extract_output_from_forward( output["fluid_name"], forward_input_names, forward_output_names, forward_input_orig_names, forward_output_orig_names, output['drop_empty_grad'])}}); {% endfor %} grad_op->SetAttrMap(this->Attrs()); {% for attr in op["attrs"] %} {% set attr_name = attr["fluid_name"] %} {% if attr_name in forward_attr_names %} {% if attr["typename"] == "IntArray" %} {% if 'tensor_name' in attr or 'manual_flag' not in attr %} if (this->HasInput("{{attr | to_int_array_tensor_name}}")) { grad_op->SetInput("{{attr | to_int_array_tensor_name}}", this->Input("{{attr | to_int_array_tensor_name}}")); } {% endif %} {% if 'tensors_name' in attr or 'manual_flag' not in attr %} if (this->HasInput("{{attr | to_int_array_tensors_name}}")) { grad_op->SetInput("{{attr | to_int_array_tensors_name}}", this->Input("{{attr | to_int_array_tensors_name}}")); } {% endif %} {% elif attr["typename"] == "Scalar" %} if (this->HasInput("{{attr | to_scalar_tensor_name}}")) { grad_op->SetInput("{{attr | to_scalar_tensor_name}}", this->Input("{{attr | to_scalar_tensor_name}}")); } {% endif %} {% else %}{# maybe something wrong: backward op has more attrs than the forward one#} grad_op->SetAttr("{{attr_name}}", {{process_default_value(attr)}}); {% endif %} {% endfor %} } }; {% endmacro %} {% macro backward_op_reused_maker(bw_op, forward_op, invoke_op) %} {% set name = bw_op["op_name"] %} {% set forward_input_names = bw_op["forward"]["inputs"] | map(attribute="fluid_name") | list %} {% set forward_output_names = bw_op["forward"]["outputs"] | map(attribute="fluid_name") | list %} {% set forward_attr_names = bw_op["forward"]["attrs"] | map(attribute="fluid_name") | list %} {% set forward_input_orig_names = forward_op["inputs"] | map(attribute="fluid_name") | list %} {% set forward_output_orig_names = forward_op["outputs"] | map(attribute="fluid_name") | list %} {% set forward_attr_orig_names = forward_op["attrs"] | map(attribute="fluid_name") | list %} template class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker { public: using framework::SingleGradOpMaker::SingleGradOpMaker; protected: void Apply(GradOpPtr grad_op) const override { grad_op->SetType("{{invoke_op["func"]}}"); {% for input in invoke_op["inputs"] %} grad_op->SetInput({{input["fluid_name"] | to_opmaker_name}}, this->{{extract_input_from_forward( input["value"], forward_input_names, forward_output_names, forward_input_orig_names, forward_output_orig_names)}}); {% endfor %} {% for output in invoke_op["outputs"] %} grad_op->SetOutput({{output["fluid_name"] | to_opmaker_name}}, this->{{extract_output_from_forward( output["value"], forward_input_names, forward_output_names, forward_input_orig_names, forward_output_orig_names, true)}}); {% endfor %} {% for attr in invoke_op["attrs"] %} {% set attr_name = attr["fluid_name"] %} {% set fw_attrs = forward_op["attrs"] %} {% if attr_name in forward_attr_names %} {# invoke_op's attrs and fw_attr's attrs must be the same#} {% set fw_attr = fw_attrs[loop.index0] %} {% if fw_attr["typename"] == "IntArray" %} {% if 'tensor_name' in attr or 'manual_flag' not in attr %} if (this->HasInput("{{fw_attr | to_int_array_tensor_name}}")) { grad_op->SetInput("{{fw_attr | to_int_array_tensor_name}}", this->Input("{{fw_attr | to_int_array_tensor_name}}")); } {% endif %} {% if 'tensors_name' in fw_attr or 'manual_flag' not in fw_attr %} if (this->HasInput("{{fw_attr | to_int_array_tensors_name}}")) { grad_op->SetInput("{{fw_attr | to_int_array_tensors_name}}", this->Input("{{fw_attr | to_int_array_tensors_name}}")); } {% endif %} {% elif fw_attr["typename"] == "Scalar" %} if (this->HasInput("{{fw_attr | to_scalar_tensor_name}}")) { grad_op->SetInput("{{fw_attr | to_scalar_tensor_name}}", this->Input("{{fw_attr | to_scalar_tensor_name}}")); } {% endif %} {% endif %} {% endfor %} {% for attr in invoke_op["attrs"] %} grad_op->SetAttr("{{attr["fluid_name"]}}", {{attr["value"]}}); {% endfor %} } }; {% endmacro %} {% macro composite_grad_op_maker(backward_op) %} {% set op_name = backward_op["op_name"] %} {% set inputs = backward_op["inputs"] | to_variable_names("name")%} {% set input_dict = backward_op["input_dict"] %} {% set fluid_inputs = backward_op["inputs"] | to_variable_names("fluid_name")%} {% set forward_fluid_inputs = backward_op["forward"]["inputs"] | to_variable_names("fluid_name")%} {% set forward_fluid_outputs = backward_op["forward"]["outputs"] | to_variable_names("fluid_name")%} {% set attrs = backward_op["attrs"] | to_variable_names("name") %} {% set fluid_attrs = backward_op["attrs"] | to_variable_names("fluid_name") %} {% set attr_dict = backward_op["attr_dict"] %} {% set outputs = backward_op["outputs"] | to_variable_names("name")%} {% set output_dict = backward_op["output_dict"] %} {% set fluid_outputs = backward_op["outputs"] | to_variable_names("fluid_name")%} {% set composite_func_info = backward_op["composite"] %} class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradOpMakerBase { public: using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; void Apply() override { //get inputs {{construct_composite_input(inputs, fluid_inputs, forward_fluid_inputs, forward_fluid_outputs, input_dict)}} //get attr {{construct_composite_attr(attrs, fluid_attrs, attr_dict)}} //get output {{construct_composite_output(outputs, fluid_outputs, output_dict)}} //get output ptr {{construct_composite_output_ptr(outputs, output_dict)}} //get output orginal name {{get_composite_output_orginal_name(outputs, output_dict)}} //call composite backward func {{call_composite_backward_api(composite_func_info)}} //recover output name {{recover_composite_output_name(outputs)}} } }; {%- endmacro %} {% macro construct_composite_input(inputs, fluid_inputs, forward_fluid_inputs, forward_fluid_outputs, input_dict) %} {% set inputs_length = inputs | length %} {% for i in range(inputs_length) %} {% set input_typename = input_dict[inputs[i]]["typename"] %} {% set input_optional_flag = input_dict[inputs[i]]["optional"] %} {% if fluid_inputs[i] in forward_fluid_inputs %} {% if input_typename == "Tensor" %} {% if input_optional_flag == True %} auto {{inputs[i]}} = this->GetOptionalSingleForwardInput("{{fluid_inputs[i]}}"); {% else %} auto {{inputs[i]}} = this->GetSingleForwardInput("{{fluid_inputs[i]}}"); {% endif %} {% elif input_typename == "Tensor[]" %} {% if input_optional_flag == True %} auto {{inputs[i]}} = this->GetOptionalMultiForwardInput("{{fluid_inputs[i]}}"); {% else %} auto {{inputs[i]}} = this->GetMultiForwardInput("{{fluid_inputs[i]}}"); {% endif %} {% endif %} {% elif fluid_inputs[i] in forward_fluid_outputs %} {% if input_typename == "Tensor" %} {% if input_optional_flag == True %} auto {{inputs[i]}} = this->GetOptionalSingleForwardOutput("{{fluid_inputs[i]}}"); {% else %} auto {{inputs[i]}} = this->GetSingleForwardOutput("{{fluid_inputs[i]}}"); {% endif %} {% elif input_typename == "Tensor[]" %} {% if input_optional_flag == True %} auto {{inputs[i]}} = this->GetOptionalMultiForwardOutput("{{fluid_inputs[i]}}"); {% else %} auto {{inputs[i]}} = this->GetMultiForwardOutput("{{fluid_inputs[i]}}"); {% endif %} {% endif %} {% elif fluid_inputs[i][:-5] in forward_fluid_outputs %} {% if input_typename == "Tensor" %} {% if input_optional_flag == True %} auto {{inputs[i]}} = this->GetOptionalSingleOutputGrad("{{fluid_inputs[i][:-5]}}"); {% else %} auto {{inputs[i]}} = this->GetSingleOutputGrad("{{fluid_inputs[i][:-5]}}"); {% endif %} {% elif input_typename == "Tensor[]" %} {% if input_optional_flag == True %} auto {{inputs[i]}} = this->GetOptionalMultiOutputGrad("{{fluid_inputs[i][:-5]}}"); {% else %} auto {{inputs[i]}} = this->GetMultiOutputGrad("{{fluid_inputs[i][:-5]}}"); {%- endif %} {%- endif %} {%- endif %} {%- endfor %} {%- endmacro %} {% macro construct_composite_attr(attrs, fluid_attrs, attr_dict) %} {% set attrs_length = attrs | length %} {% for i in range(attrs_length) %} {% set attrs_data_type = attr_dict[attrs[i]]["typename"] | to_op_attr_type %} const {{attrs_data_type}} {{attrs[i]}} = this->Attr<{{attrs_data_type}}>("{{fluid_attrs[i]}}"); {% endfor %} {%- endmacro %} {% macro construct_composite_output(outputs, fluid_outputs, output_dict) %} {% set outputs_length = outputs | length %} {% for i in range(outputs_length) %} {% set output_typename = output_dict[outputs[i]]["typename"] %} {% if output_typename == "Tensor" %} auto {{outputs[i] + "_t"}} = this->GetSingleInputGrad("{{fluid_outputs[i][:-5]}}"); {% elif output_typename == "Tensor[]" %} auto {{outputs[i] + "_t"}} = this->GetMultiInputGrad("{{fluid_outputs[i][:-5]}}"); {%- endif %} {%- endfor %} {%- endmacro %} {% macro construct_composite_output_ptr(outputs, output_dict) %} {% set outputs_length = outputs | length %} {% for i in range(outputs_length) %} {% set output_typename = output_dict[outputs[i]]["typename"] %} {% if output_typename == "Tensor" %} auto {{outputs[i]}} = this->GetOutputPtr(&{{outputs[i]+ "_t"}}); {% elif output_typename == "Tensor[]" %} std::vector {{outputs[i]}}({{outputs[i] + "_t"}}.size()); for(size_t i = 0; i < {{outputs[i]}}.size(); ++i){ {{outputs[i]}}[i] = &{{outputs[i] + "_t"}}[i]; } {{outputs[i]}} = this->GetOutputPtr({{outputs[i]}}); {%- endif %} {%- endfor %} {%- endmacro %} {% macro get_composite_output_orginal_name(outputs, output_dict) %} {% set outputs_length = outputs | length %} {% for i in range(outputs_length) %} {% set output_typename = output_dict[outputs[i]]["typename"] %} {% if output_typename == "Tensor" %} auto {{outputs[i] + "_name"}} = this->GetOutputName({{outputs[i] + "_t"}}); {% elif output_typename == "Tensor[]" %} auto {{outputs[i] + "_name"}} = this->GetOutputName({{outputs[i] + "_t"}}); {%- endif %} {%- endfor %} {%- endmacro %} {% macro call_composite_backward_api(composite_func_info) %} VLOG(6) << "Runing {{composite_func_info["func_name"]}} composite func"; prim::{{composite_func_info["func_name"]}}({{composite_func_info["func_args"]}}); {%- endmacro %} {% macro recover_composite_output_name(outputs) %} {% set outputs_length = outputs | length %} {% for i in range(outputs_length) %} this->RecoverOutputName({{outputs[i] + "_t"}}, {{outputs[i] + "_name"}}); {% endfor %} {%- endmacro %} {% macro extract_input_from_forward(name, input_names, output_names, input_orig_names, output_orig_names) %}{# inline #} {% if name in input_names %} {% set name_in_forward_orig = input_orig_names[input_names.index(name)]%} Input({{name_in_forward_orig | to_opmaker_name}}) {%- elif name in output_names %} {% set name_in_forward_orig = output_orig_names[output_names.index(name)]%} Output({{name_in_forward_orig | to_opmaker_name}}) {%- elif name.endswith("_grad") %}{# output grad#} {% set name_in_forward = name[:-5] %} {% if name_in_forward in output_names %} {% set name_in_forward_orig = output_orig_names[output_names.index(name_in_forward)] %} OutputGrad({{name_in_forward_orig | to_opmaker_name}}) {%- endif %} {%- endif %} {%- endmacro %} {% macro extract_output_from_forward(name, input_names, output_names, input_orig_names, output_orig_names, drop_empty_grad) %}{# inline #} {% if name[:-5] in input_names %} {% set name_in_forward = name[:-5] %} {% set name_in_forward_orig = input_orig_names[input_names.index(name_in_forward)]%} {%- if drop_empty_grad is true -%} InputGrad({{name_in_forward_orig | to_opmaker_name}}) {%- else -%} InputGrad({{name_in_forward_orig | to_opmaker_name}}, false) {%- endif %} {%- elif (name) in input_names %} {% set name_in_forward_orig = input_orig_names[input_names.index(name)]%} Input({{name | to_opmaker_name}}) {%- endif %} {%- endmacro %} {% macro extract_attr_from_forward(name, attr_names, attr_origin_names) %} this->GetAttr("{{name}}") {%- endmacro %}