From 813e27c95a983754f0db622ecb305875a13c302c Mon Sep 17 00:00:00 2001 From: xiaoguoguo626807 <100397923+xiaoguoguo626807@users.noreply.github.com> Date: Tue, 17 Jan 2023 14:53:15 +0800 Subject: [PATCH] Prim api gen (#49654) * proto type of composite grad in paddle * proto type of composite grad in paddle * refactor composite api with phi * fix compile error * support static graph code-gen for squeeze op * generate static graph code of unsqueeze * refine op name * fix compile error * add extra output in op_compat * remove debug log * fix clang compile error * support prim switch flag * support prim switch flag * fix dygraph error * merge develop * add code_gen * add necessary files without codegen * fix code_gen bug * add deps * modify igmnore * add ignore * delete std cout * add composite logic for backward.py * add tanh first order grad composite * support enable_prim flag for static graph * throw expection when both GrapOpMaker and GradCompOpMaker not been registered * reorganize the directory of prim api tests * fix windows error * add eager_utils * add eager_utils * modify code gen * add composite parse * add unittest for get_grad_op_desc * code optimize * fix static test on windows * support generate static graph code for imag and real op * fix windows compile error in test_static_prim * merge develop * disable test eager in inference * prim code gen * disable eager compile in inference * origin_yaml codegen success * rm other file * rm gitignore file * code_style * add eager test * code_style * clear # * merge develop * clear # * remove useless files * modify static test * support bool flag from singlton * merge develop * recover git ignore * fix conflict * clear prim_gen * recover git ignore for generated op * parse_yaml success * fix test compile error * remove some tests * add python test * code_style * revert parse_utils+ clear prim_gen * fix some name issue * add composite code gen * modify backward yaml * fix static composite grad maker code gen * remove addtional files * add some static funcs unit test * fix some bugs * fix composite grad maker register code gen * optimize some functions * modify gen cmake * add more api gen * add header * modify static * add static expand unsqueeze * comments * modify compopmaker * revert * modify gen name Co-authored-by: JiabinYang <360788950@qq.com> Co-authored-by: zyfncg Co-authored-by: cxxly Co-authored-by: charles-hit --- paddle/fluid/prim/api/.gitignore | 3 + paddle/fluid/prim/api/CMakeLists.txt | 3 + paddle/fluid/prim/api/all.h | 2 +- .../api/auto_code_generated/CMakeLists.txt | 38 ++ .../prim/api/auto_code_generated/prim_base.py | 342 ++++++++++++++++++ .../prim/api/auto_code_generated/prim_gen.py | 132 +++++++ .../fluid/prim/api/generated/CMakeLists.txt | 1 + .../prim_api/CMakeLists.txt | 0 .../prim_api/static_prim_api.cc | 54 +-- paddle/fluid/prim/api/manual/CMakeLists.txt | 1 - .../manual/backward/composite_backward_api.h | 1 + .../api/manual/prim_api/eager_prim_api.cc | 81 ----- .../fluid/prim/api/manual/prim_api/prim_api.h | 53 +-- 13 files changed, 554 insertions(+), 157 deletions(-) create mode 100644 paddle/fluid/prim/api/.gitignore create mode 100644 paddle/fluid/prim/api/auto_code_generated/CMakeLists.txt create mode 100644 paddle/fluid/prim/api/auto_code_generated/prim_base.py create mode 100644 paddle/fluid/prim/api/auto_code_generated/prim_gen.py create mode 100644 paddle/fluid/prim/api/generated/CMakeLists.txt rename paddle/fluid/prim/api/{manual => generated}/prim_api/CMakeLists.txt (100%) rename paddle/fluid/prim/api/{manual => generated}/prim_api/static_prim_api.cc (91%) delete mode 100644 paddle/fluid/prim/api/manual/prim_api/eager_prim_api.cc diff --git a/paddle/fluid/prim/api/.gitignore b/paddle/fluid/prim/api/.gitignore new file mode 100644 index 0000000000..377e800f00 --- /dev/null +++ b/paddle/fluid/prim/api/.gitignore @@ -0,0 +1,3 @@ +generated/prim_api/eager_prim_api.cc +generated/prim_api/tmp_eager_prim_api.cc +generated/prim_api/*.h diff --git a/paddle/fluid/prim/api/CMakeLists.txt b/paddle/fluid/prim/api/CMakeLists.txt index 534ddec6b5..436cecc325 100644 --- a/paddle/fluid/prim/api/CMakeLists.txt +++ b/paddle/fluid/prim/api/CMakeLists.txt @@ -1,4 +1,7 @@ +add_subdirectory(auto_code_generated) add_subdirectory(manual) +add_subdirectory(generated) + if(NOT (NOT WITH_PYTHON AND ON_INFER)) cc_library( prim_api diff --git a/paddle/fluid/prim/api/all.h b/paddle/fluid/prim/api/all.h index 308eb91b4f..2996d2aa26 100644 --- a/paddle/fluid/prim/api/all.h +++ b/paddle/fluid/prim/api/all.h @@ -13,6 +13,6 @@ // limitations under the License. #pragma once +#include "paddle/fluid/prim/api/generated/prim_api/prim_api.h" #include "paddle/fluid/prim/api/manual/backward/composite_backward_api.h" -#include "paddle/fluid/prim/api/manual/prim_api/prim_api.h" #include "paddle/fluid/prim/api/manual/utils/utils.h" diff --git a/paddle/fluid/prim/api/auto_code_generated/CMakeLists.txt b/paddle/fluid/prim/api/auto_code_generated/CMakeLists.txt new file mode 100644 index 0000000000..e36af681bb --- /dev/null +++ b/paddle/fluid/prim/api/auto_code_generated/CMakeLists.txt @@ -0,0 +1,38 @@ +set(api_yaml_path + "${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/ops.parsed.yaml" +) +set(legacy_api_yaml_path + "${PADDLE_SOURCE_DIR}/paddle/fluid/operators/generator/parsed_ops/legacy_ops.parsed.yaml" +) +set(tmp_eager_prim_api_cc_path + "${PADDLE_SOURCE_DIR}/paddle/fluid/prim/api/generated/prim_api/tmp_eager_prim_api.cc" +) +set(tmp_prim_api_h_path + "${PADDLE_SOURCE_DIR}/paddle/fluid/prim/api/generated/prim_api/tmp_prim_api.h" +) +set(eager_prim_api_cc_path + "${PADDLE_SOURCE_DIR}/paddle/fluid/prim/api/generated/prim_api/eager_prim_api.cc" +) +set(prim_api_h_path + "${PADDLE_SOURCE_DIR}/paddle/fluid/prim/api/generated/prim_api/prim_api.h") +set(prim_api_gen_file + ${PADDLE_SOURCE_DIR}/paddle/fluid/prim/api/auto_code_generated/prim_gen.py) + +message("prim api Code gen") +execute_process( + WORKING_DIRECTORY + ${CMAKE_SOURCE_DIR}/paddle/fluid/prim/api/auto_code_generated + COMMAND + ${PYTHON_EXECUTABLE} ${prim_api_gen_file} --api_yaml_path + ${legacy_api_yaml_path} ${api_yaml_path} --prim_api_header_path + ${tmp_prim_api_h_path} --eager_prim_api_source_path + ${tmp_eager_prim_api_cc_path} + RESULT_VARIABLE _result) +if(${_result}) + message(FATAL_ERROR "prim api genrate failed, exiting.") +endif() +execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different + ${tmp_prim_api_h_path} ${prim_api_h_path}) +execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different + ${tmp_eager_prim_api_cc_path} ${eager_prim_api_cc_path}) +message("copy tmp_xxx_prim_api to xxx_prim_api") diff --git a/paddle/fluid/prim/api/auto_code_generated/prim_base.py b/paddle/fluid/prim/api/auto_code_generated/prim_base.py new file mode 100644 index 0000000000..d1ad94a7c3 --- /dev/null +++ b/paddle/fluid/prim/api/auto_code_generated/prim_base.py @@ -0,0 +1,342 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# prim api list +white_ops_list = [ + "pow", + "scale", + "multiply", + "unsqueeze", + "expand", + "full", + "reshape", + "divide", + "sum", + "exp", +] + +inplace_out_type_map = { + "Tensor": "Tensor&", + "std::vector": "std::vector&", +} + +inplace_optional_out_type_map = { + "Tensor": "paddle::optional&", + "std::vector": "paddle::optional>&", +} + + +class BaseAPI: + def __init__(self, api_item_yaml): + # self.api = api_item_yaml['op'] + self.api = api_item_yaml['name'] + + self.is_prim_api = False + if api_item_yaml['name'] in white_ops_list: + self.is_prim_api = True + + ####################################### + # inputs: + # names : [], list of input names + # input_info : {input_name : type} + # attrs: + # names : [], list of attribute names + # attr_info : { attr_name : (type, default_values)} + # outputs: + # names : [], list of output names + # types : [], list of output types + # out_size_expr : [], expression for getting size of vector + ######################################## + if self.is_prim_api: + ( + self.inputs, + self.attrs, + self.outputs, + self.optional_vars, + ) = self.parse_args(self.api, api_item_yaml) + + self.inplace_map = api_item_yaml['inplace'] + + def get_api_func_name(self): + return self.api + + # def is_inplace(self): + # if self.inplace_map + # return True + # return False + + def get_input_tensor_args(self, inplace_flag=False): + input_args = [] + inplace_type_map = { + "const Tensor&": "Tensor&", + "const paddle::optional&": "paddle::optional&", + "const std::vector&": "std::vector&", + "const paddle::optional>&": "paddle::optional>&", + } + for name in self.inputs['names']: + name = name.split('@')[0] + if inplace_flag and name in self.inplace_map.values(): + input_args.append( + inplace_type_map[self.inputs['input_info'][name]] + + ' ' + + name + ) + else: + input_args.append(self.inputs['input_info'][name] + ' ' + name) + return input_args + + def get_declare_args(self, inplace_flag=False): + declare_args = self.get_input_tensor_args(inplace_flag) + for name in self.attrs['names']: + default_value = '' + if self.attrs['attr_info'][name][1] is not None: + default_value = ' = ' + self.attrs['attr_info'][name][1] + declare_args.append( + self.attrs['attr_info'][name][0] + ' ' + name + default_value + ) + + return ", ".join(declare_args) + + def get_declare_args_nodefault(self, inplace_flag=False): + declare_args = self.get_input_tensor_args(inplace_flag) + for name in self.attrs['names']: + declare_args.append(self.attrs['attr_info'][name][0] + ' ' + name) + + return ", ".join(declare_args) + + def get_return_type(self, inplace_flag=False): + out_type_list = [] + for i, out_type in enumerate(self.outputs['types']): + out_name = self.outputs['names'][i].split('@')[0] + if inplace_flag and out_name in self.inplace_map: + if self.inplace_map[out_name] in self.optional_vars: + out_type_list.append( + inplace_optional_out_type_map[out_type] + ) + else: + out_type_list.append(inplace_out_type_map[out_type]) + else: + out_type_list.append(out_type) + if len(out_type_list) == 1: + return out_type_list[0] + else: + return "std::tuple<" + ", ".join(out_type_list) + ">" + + def parse_args(self, api_name, api_item_yaml): + optional_vars = [] + for input_dict in api_item_yaml['inputs']: + if input_dict['optional']: + optional_vars.append(input_dict['name']) + + inputs, attrs = self.parse_input_and_attr( + api_item_yaml['inputs'], api_item_yaml['attrs'] + ) + + output_type_list, output_names, out_size_expr = self.parse_output( + api_item_yaml['outputs'] + ) + return ( + inputs, + attrs, + { + 'names': output_names, + 'types': output_type_list, + 'out_size_expr': out_size_expr, + }, + optional_vars, + ) + + def parse_input_and_attr(self, inputs_list, attrs_list): + input_types_map = { + 'Tensor': 'const Tensor&', + 'Tensor[]': 'const std::vector&', + } + attr_types_map = { + 'IntArray': 'const IntArray&', + 'Scalar': 'const Scalar&', + 'Scalar(int)': 'const Scalar&', + 'Scalar(int64_t)': 'const Scalar&', + 'Scalar(float)': 'const Scalar&', + 'Scalar(dobule)': 'const Scalar&', + 'Scalar[]': 'const std::vector&', + 'int': 'int', + 'int32_t': 'int32_t', + 'int64_t': 'int64_t', + 'long': 'long', + 'size_t': 'size_t', + 'float': 'float', + 'float[]': 'const std::vector&', + 'double': 'double', + 'bool': 'bool', + 'bool[]': 'const std::vector&', + 'str': 'const std::string&', + 'str[]': 'const std::vector&', + 'Place': 'const Place&', + 'DataLayout': 'DataLayout', + 'DataType': 'DataType', + 'int64_t[]': 'const std::vector&', + 'int[]': 'const std::vector&', + } + optional_types_trans = { + 'Tensor': 'const paddle::optional&', + 'Tensor[]': 'const paddle::optional>&', + 'int': 'paddle::optional', + 'int32_t': 'paddle::optional', + 'int64_t': 'paddle::optional', + 'float': 'paddle::optional', + 'double': 'paddle::optional', + 'bool': 'paddle::optional', + 'Place': 'paddle::optional', + 'DataLayout': 'paddle::optional', + 'DataType': 'paddle::optional', + } + + inputs = {'names': [], 'input_info': {}} + for input_dict in inputs_list: + inputs['names'].append(input_dict['name']) + if input_dict['optional']: + inputs['input_info'][input_dict['name']] = optional_types_trans[ + input_dict['typename'] + ] + else: + inputs['input_info'][input_dict['name']] = input_types_map[ + input_dict['typename'] + ] + + attrs = {'names': [], 'attr_info': {}} + for attr_dict in attrs_list: + attrs['names'].append(attr_dict['name']) + if 'default_value' in attr_dict.keys(): + default_value = attr_dict['default_value'] + else: + default_value = None + + if 'optional' in attr_dict.keys(): + attrs['attr_info'][attr_dict['name']] = ( + optional_types_trans[attr_dict['typename']], + default_value, + ) + else: + attrs['attr_info'][attr_dict['name']] = ( + attr_types_map[attr_dict['typename']], + default_value, + ) + return inputs, attrs + + def parse_output(self, outputs_list): + + out_type_list = [] + out_name_list = [] + out_size_expr_list = [] + for output_dict in outputs_list: + if output_dict['intermediate']: + continue + out_type_list.append(output_dict['typename']) + out_name_list.append(output_dict['name']) + if 'size' in output_dict.keys(): + out_size_expr_list.append(output_dict['size']) + else: + out_size_expr_list.append(None) + return out_type_list, out_name_list, out_size_expr_list + + +class EagerPrimAPI(BaseAPI): + def __init__(self, api_item_yaml): + super().__init__(api_item_yaml) + + def get_api__func_name(self): + api_func_name = self.api + # if self.is_inplace: + # if api_func_name[-1] != '_': + # api_func_name += '_' + # print("after api name", api_func_name) + return api_func_name + + def gene_prim_api_declaration(self): + api_declaration = "" + api_func_name = self.get_api__func_name() + if api_func_name[-1] != '_': + api_declaration = f""" +template +{self.get_return_type()} {api_func_name}({self.get_declare_args()}); +""" + else: + api_declaration = ( + api_declaration + + f""" +template +{self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_declare_args(inplace_flag=True)}); +""" + ) + + return api_declaration + + def get_ad_func_input_args(self, inplace_flag=False): + input_args = [] + for name in self.inputs['names']: + name = name.split('@')[0] + if inplace_flag and name in self.inplace_map.values(): + input_args.append(name) + else: + input_args.append(name) + return input_args + + def get_ad_func_args(self, inplace_flag=False): + ad_func_args = self.get_ad_func_input_args(inplace_flag) + for name in self.attrs['names']: + default_value = '' + if self.attrs['attr_info'][name][1] is not None: + default_value = ' = ' + self.attrs['attr_info'][name][1] + ad_func_args.append(name) + + ad_func_args_str = ", ".join(ad_func_args) + return ad_func_args_str + + def gene_ad_func_call(self): + api_func_name = self.get_api__func_name() + + dygraph_ad_func_name = '::' + api_func_name + '_ad_func' + dygraph_ad_func_parameters = self.get_ad_func_args() + + ad_func_call_str = f""" +VLOG(4) << "Eager Prim API {api_func_name}_ad_func call"; +return {dygraph_ad_func_name}({dygraph_ad_func_parameters}); +""" + # print("ad_func_call_str: ", ad_func_call_str) + return ad_func_call_str + + def gene_eager_prim_api_code(self): + api_code = "" + indent = " " + api_func_name = self.get_api__func_name() + template = '' + # func decalaration + if api_func_name[-1] != '_': + api_code = f""" +template <> +{self.get_return_type()} {api_func_name}{template}({self.get_declare_args_nodefault()}) +""" + else: + api_code = f""" +template <> +{self.get_return_type(inplace_flag=True)} {api_func_name}{template}({self.get_declare_args_nodefault(inplace_flag=True)}) +""" + # func code + + api_code = api_code + '{' + api_code += f"""{self.gene_ad_func_call()}""" + api_code += '}' + '\n' + + return api_code diff --git a/paddle/fluid/prim/api/auto_code_generated/prim_gen.py b/paddle/fluid/prim/api/auto_code_generated/prim_gen.py new file mode 100644 index 0000000000..7bc59df4f3 --- /dev/null +++ b/paddle/fluid/prim/api/auto_code_generated/prim_gen.py @@ -0,0 +1,132 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import yaml +from prim_base import EagerPrimAPI + + +def header_include(): + return """ +#include "paddle/phi/common/int_array.h" +#include "paddle/phi/common/data_type.h" +#include "paddle/phi/common/scalar.h" +#include "paddle/phi/common/place.h" +#include "paddle/utils/optional.h" +""" + + +def eager_source_include(header_file_path): + return """ +#include "paddle/fluid/eager/api/all.h" +#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h" +#include "paddle/fluid/prim/api/generated/prim_api/prim_api.h" +""" + + +def api_namespace(): + return ( + """ +namespace paddle { +namespace prim { +""", + """ +using Tensor = paddle::experimental::Tensor; +using Scalar = paddle::experimental::Scalar; +using IntArray = paddle::experimental::IntArray; +using DataType = paddle::experimental::DataType; +""", + """ +} // namespace prim +} // namespace paddle +""", + ) + + +def generate_api(api_yaml_path, header_file_path, eager_prim_source_file_path): + apis = [] + + for each_api_yaml in api_yaml_path: + with open(each_api_yaml, 'r') as f: + api_list = yaml.load(f, Loader=yaml.FullLoader) + if api_list: + apis.extend(api_list) + + header_file = open(header_file_path, 'w') + eager_prim_source_file = open(eager_prim_source_file_path, 'w') + + namespace = api_namespace() + + header_file.write("#pragma once\n") + header_file.write(header_include()) + header_file.write(namespace[0]) + header_file.write(namespace[1]) + include_header_file = ( + "#include paddle/fluid/prim/api/generated/prim_api/prim_api.h" + ) + eager_prim_source_file.write(eager_source_include(include_header_file)) + eager_prim_source_file.write(namespace[0]) + + for api in apis: + prim_api = EagerPrimAPI(api) + if prim_api.is_prim_api: + header_file.write(prim_api.gene_prim_api_declaration()) + eager_prim_source_file.write(prim_api.gene_eager_prim_api_code()) + + header_file.write(namespace[2]) + eager_prim_source_file.write(namespace[2]) + + header_file.close() + eager_prim_source_file.close() + + +def main(): + parser = argparse.ArgumentParser( + description='Generate PaddlePaddle C++ API files' + ) + parser.add_argument( + '--api_yaml_path', + help='path to api yaml file', + nargs='+', + default=['paddle/phi/api/yaml/ops.yaml'], + ) + + parser.add_argument( + '--prim_api_header_path', + help='output of generated prim_api header code file', + default='paddle/fluid/prim/api/generated/prim_api/prim_api.h', + ) + + parser.add_argument( + '--eager_prim_api_source_path', + help='output of generated eager_prim_api source code file', + default='paddle/fluid/prim/api/generated/prim_api/eager_prim_api.cc', + ) + + options = parser.parse_args() + + api_yaml_path = options.api_yaml_path + prim_api_header_file_path = options.prim_api_header_path + eager_prim_api_source_file_path = options.eager_prim_api_source_path + + generate_api( + api_yaml_path, + prim_api_header_file_path, + eager_prim_api_source_file_path, + ) + + +if __name__ == '__main__': + main() diff --git a/paddle/fluid/prim/api/generated/CMakeLists.txt b/paddle/fluid/prim/api/generated/CMakeLists.txt new file mode 100644 index 0000000000..a1b75527c2 --- /dev/null +++ b/paddle/fluid/prim/api/generated/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(prim_api) diff --git a/paddle/fluid/prim/api/manual/prim_api/CMakeLists.txt b/paddle/fluid/prim/api/generated/prim_api/CMakeLists.txt similarity index 100% rename from paddle/fluid/prim/api/manual/prim_api/CMakeLists.txt rename to paddle/fluid/prim/api/generated/prim_api/CMakeLists.txt diff --git a/paddle/fluid/prim/api/manual/prim_api/static_prim_api.cc b/paddle/fluid/prim/api/generated/prim_api/static_prim_api.cc similarity index 91% rename from paddle/fluid/prim/api/manual/prim_api/static_prim_api.cc rename to paddle/fluid/prim/api/generated/prim_api/static_prim_api.cc index 62854061ef..fd309750ed 100644 --- a/paddle/fluid/prim/api/manual/prim_api/static_prim_api.cc +++ b/paddle/fluid/prim/api/generated/prim_api/static_prim_api.cc @@ -25,6 +25,7 @@ #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/convert_utils.h" +#include "paddle/fluid/prim/api/generated/prim_api/prim_api.h" #include "paddle/fluid/prim/api/manual/prim_api/prim_api.h" #include "paddle/fluid/prim/api/manual/utils/utils.h" #include "paddle/fluid/prim/utils/static/composite_grad_desc_maker.h" @@ -37,7 +38,7 @@ namespace paddle { namespace prim { template <> -Tensor pow(const Tensor& x, const paddle::experimental::Scalar& y) { +Tensor pow(const Tensor& x, const Scalar& y) { Tensor out = empty({}, phi::DataType::FLOAT32, paddle::Place()); framework::BlockDesc* block = StaticCompositeContext::Instance().GetBlock(); framework::OpDesc* op = block->AppendOp(); @@ -55,7 +56,7 @@ Tensor pow(const Tensor& x, const paddle::experimental::Scalar& y) { template <> Tensor scale(const Tensor& x, - const paddle::experimental::Scalar& scale, + const Scalar& scale, float bias, bool bias_after_scale) { Tensor out = empty({}, phi::DataType::FLOAT32, paddle::Place()); @@ -95,63 +96,63 @@ Tensor multiply(const Tensor& x, const Tensor& y) { } template <> -Tensor expand(const Tensor& x, const IntArray& shape) { +Tensor unsqueeze(const Tensor& x, const IntArray& axis) { Tensor out = empty({}, phi::DataType::FLOAT32, paddle::Place()); framework::BlockDesc* block = StaticCompositeContext::Instance().GetBlock(); framework::OpDesc* op = block->AppendOp(); - op->SetType("expand_v2"); + op->SetType("unsqueeze2"); op->SetInput("X", {std::static_pointer_cast(x.impl())->Name()}); op->SetOutput( "Out", {std::static_pointer_cast(out.impl())->Name()}); - std::vector new_shape(shape.GetData().begin(), shape.GetData().end()); - op->SetAttr("shape", new_shape); + std::vector new_shape(axis.GetData().begin(), axis.GetData().end()); + op->SetAttr("axes", new_shape); op->CheckAttrs(); op->InferVarType(block); return out; } template <> -Tensor divide(const Tensor& x, const Tensor& y) { - // Grad infershape +Tensor expand(const Tensor& x, const IntArray& shape) { Tensor out = empty({}, phi::DataType::FLOAT32, paddle::Place()); framework::BlockDesc* block = StaticCompositeContext::Instance().GetBlock(); framework::OpDesc* op = block->AppendOp(); - op->SetType("elementwise_div"); + op->SetType("expand_v2"); op->SetInput("X", {std::static_pointer_cast(x.impl())->Name()}); - op->SetInput("Y", - {std::static_pointer_cast(y.impl())->Name()}); op->SetOutput( "Out", {std::static_pointer_cast(out.impl())->Name()}); + std::vector new_shape(shape.GetData().begin(), shape.GetData().end()); + op->SetAttr("shape", new_shape); op->CheckAttrs(); op->InferVarType(block); - op->InferShape(*block); return out; } template <> -Tensor unsqueeze(const Tensor& x, const IntArray& axis) { +Tensor divide(const Tensor& x, const Tensor& y) { + // Grad infershape Tensor out = empty({}, phi::DataType::FLOAT32, paddle::Place()); framework::BlockDesc* block = StaticCompositeContext::Instance().GetBlock(); framework::OpDesc* op = block->AppendOp(); - op->SetType("unsqueeze2"); + op->SetType("elementwise_div"); op->SetInput("X", {std::static_pointer_cast(x.impl())->Name()}); + op->SetInput("Y", + {std::static_pointer_cast(y.impl())->Name()}); op->SetOutput( "Out", {std::static_pointer_cast(out.impl())->Name()}); - std::vector new_shape(axis.GetData().begin(), axis.GetData().end()); - op->SetAttr("axes", new_shape); op->CheckAttrs(); op->InferVarType(block); + op->InferShape(*block); return out; } template <> -Tensor full(paddle::experimental::IntArray shape, - paddle::experimental::Scalar value, - paddle::experimental::DataType dtype, - paddle::platform::Place place) { +Tensor full(const IntArray& shape, + const Scalar& value, + DataType dtype, + const Place& place) { // Grad infershape Tensor out = empty({}, dtype, place); framework::BlockDesc* block = StaticCompositeContext::Instance().GetBlock(); @@ -159,9 +160,8 @@ Tensor full(paddle::experimental::IntArray shape, op->SetType("fill_constant"); op->SetAttr("shape", shape.GetData()); PADDLE_ENFORCE_EQ( - ((dtype == paddle::experimental::DataType::FLOAT32) || - (dtype == paddle::experimental::DataType::FLOAT64) || - (dtype == paddle::experimental::DataType::FLOAT16)), + ((dtype == DataType::FLOAT32) || (dtype == DataType::FLOAT64) || + (dtype == DataType::FLOAT16)), true, phi::errors::InvalidArgument( "We only support float32/float16 for full, but we got data type: %s", @@ -177,9 +177,9 @@ Tensor full(paddle::experimental::IntArray shape, } template <> -Tensor sum(Tensor x, - paddle::experimental::IntArray axis, - paddle::experimental::DataType dtype, +Tensor sum(const Tensor& x, + const IntArray& axis, + DataType dtype, bool keepdim) { // Grad infershape Tensor out = empty({}, dtype, paddle::Place()); @@ -204,7 +204,7 @@ Tensor sum(Tensor x, } template <> -Tensor reshape(Tensor x, paddle::experimental::IntArray shape) { +Tensor reshape(const Tensor& x, const IntArray& shape) { // Grad infershape Tensor out = empty({}, x.dtype(), paddle::Place()); framework::BlockDesc* block = StaticCompositeContext::Instance().GetBlock(); diff --git a/paddle/fluid/prim/api/manual/CMakeLists.txt b/paddle/fluid/prim/api/manual/CMakeLists.txt index 261f6dd486..512d2b1553 100644 --- a/paddle/fluid/prim/api/manual/CMakeLists.txt +++ b/paddle/fluid/prim/api/manual/CMakeLists.txt @@ -1,2 +1 @@ -add_subdirectory(prim_api) add_subdirectory(utils) diff --git a/paddle/fluid/prim/api/manual/backward/composite_backward_api.h b/paddle/fluid/prim/api/manual/backward/composite_backward_api.h index 4ededb74f3..6ddec5b4e2 100644 --- a/paddle/fluid/prim/api/manual/backward/composite_backward_api.h +++ b/paddle/fluid/prim/api/manual/backward/composite_backward_api.h @@ -13,6 +13,7 @@ // limitations under the License. #pragma once +#include "paddle/fluid/prim/api/generated/prim_api/prim_api.h" #include "paddle/fluid/prim/api/manual/prim_api/prim_api.h" #include "paddle/fluid/prim/api/manual/utils/utils.h" #include "paddle/phi/common/int_array.h" diff --git a/paddle/fluid/prim/api/manual/prim_api/eager_prim_api.cc b/paddle/fluid/prim/api/manual/prim_api/eager_prim_api.cc deleted file mode 100644 index fa6e2f4277..0000000000 --- a/paddle/fluid/prim/api/manual/prim_api/eager_prim_api.cc +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/eager/api/all.h" -#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h" -#include "paddle/fluid/prim/api/manual/prim_api/prim_api.h" -#include "paddle/phi/capi/include/wrapper_base.h" -namespace paddle { -namespace prim { -template <> -Tensor pow(const Tensor& x, const paddle::experimental::Scalar& y) { - return ::pow_ad_func(x, y); -} - -template <> -Tensor scale(const Tensor& x, - const paddle::experimental::Scalar& scale, - float bias, - bool bias_after_scale) { - return ::scale_ad_func(x, scale, bias, bias_after_scale); -} - -template <> -Tensor multiply(const Tensor& x, const Tensor& y) { - return ::multiply_ad_func(x, y); -} - -template <> -Tensor expand(const Tensor& x, const IntArray& shape) { - return ::expand_ad_func(x, shape); -} - -template <> -Tensor unsqueeze(const Tensor& x, const IntArray& axis) { - return ::unsqueeze_ad_func(x, axis); -} - -template <> -Tensor divide(const Tensor& x, const Tensor& y) { - return ::divide_ad_func(x, y); -} - -template <> -Tensor full(paddle::experimental::IntArray shape, - paddle::experimental::Scalar value, - paddle::experimental::DataType dtype, - paddle::platform::Place place) { - return ::full_ad_func(shape, value, dtype, place); -} -template <> -Tensor sum(Tensor x, IntArray axis, DataType dtype, bool keepdim) { - return ::sum_ad_func(x, axis, dtype, keepdim); -} - -template <> -Tensor reshape(Tensor x, IntArray shape) { - return ::reshape_ad_func(x, shape); -} - -template <> -Tensor exp(const Tensor& x) { - return ::exp_ad_func(x); -} - -template -Tensor expand(const Tensor& x, const IntArray& shape) { - return ::expand_ad_func(x, shape); -} -} // namespace prim -} // namespace paddle diff --git a/paddle/fluid/prim/api/manual/prim_api/prim_api.h b/paddle/fluid/prim/api/manual/prim_api/prim_api.h index c7edf80a2f..65d411d863 100644 --- a/paddle/fluid/prim/api/manual/prim_api/prim_api.h +++ b/paddle/fluid/prim/api/manual/prim_api/prim_api.h @@ -12,56 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. +// prim api which can't be generated #pragma once + +#include "paddle/phi/common/data_type.h" #include "paddle/phi/common/int_array.h" +#include "paddle/phi/common/place.h" #include "paddle/phi/common/scalar.h" #include "paddle/utils/optional.h" -namespace paddle { -namespace prim { -using Tensor = paddle::experimental::Tensor; -using IntArray = paddle::experimental::IntArray; -using Scalar = paddle::experimental::Scalar; - -template -Tensor pow(const Tensor& x, const Scalar& y); - -template -Tensor scale(const Tensor& X, - const Scalar& scale, - float bias, - bool bias_after_scale); - -template -Tensor multiply(const Tensor& x, const Tensor& y); - -template -Tensor expand(const Tensor& x, const IntArray& shape); - -template -Tensor unsqueeze(const Tensor& x, const IntArray& axis); -template -Tensor divide(const Tensor& x, const Tensor& y); - -template -Tensor full(IntArray shape, - Scalar value, - DataType dtype = DataType::FLOAT32, - Place place = CPUPlace()); - -template -Tensor sum(Tensor x, - IntArray axis = {}, - DataType dtype = DataType::UNDEFINED, - bool keepdim = false); - -template -Tensor reshape(Tensor x, IntArray shape); - -template -Tensor expand(const Tensor& x, const IntArray& shape); - -template -Tensor exp(const Tensor& x); -} // namespace prim +namespace paddle { +namespace prim {} // namespace prim } // namespace paddle -- GitLab