/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include #include #include #include #include #include "paddle/phi/api/ext/exception.h" #include "paddle/phi/api/include/dll_decl.h" #include "paddle/phi/api/include/tensor.h" #include "paddle/utils/any.h" #include "paddle/utils/none.h" #include "paddle/utils/optional.h" /** * Op Meta Info Related Define. * * Used to maintain operator core information. * */ namespace paddle { class PADDLE_API OpMetaInfoHelper; using Tensor = paddle::Tensor; ///////////////// Util Marco Define //////////////// #define PD_DISABLE_COPY_AND_ASSIGN(classname) \ private: \ classname(const classname&) = delete; \ classname(classname&&) = delete; \ classname& operator=(const classname&) = delete; \ classname& operator=(classname&&) = delete #define STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg) \ struct __test_global_namespace_##uniq_name##__ {}; \ static_assert(std::is_same<::__test_global_namespace_##uniq_name##__, \ __test_global_namespace_##uniq_name##__>::value, \ msg) ///////////////// Util Define and Function //////////////// constexpr char kGradTensorSuffix[] = "@GRAD"; constexpr char kTensorVectorSuffix[] = "@VECTOR"; constexpr char kDoubleGradNewOutSuffix[] = "@NEW"; constexpr char kOptionalSuffix[] = "@OPTIONAL"; // Used for Construct Grad Tensor name inline std::string Grad(const std::string& t_name) { std::string result; result.reserve(t_name.size() + 5U); result += t_name; result += kGradTensorSuffix; return result; } // Used for Construct std::vector name inline std::string Vec(const std::string& t_name) { std::string result; result.reserve(t_name.size() + 7U); result += t_name; result += kTensorVectorSuffix; return result; } // Used for Construct double grad output name inline std::string New(const std::string& t_name) { std::string result; result.reserve(t_name.size() + 4U); result += t_name; result += kDoubleGradNewOutSuffix; return result; } // Used for Construct paddle::optional name inline std::string Optional(const std::string& t_name) { std::string result; result.reserve(t_name.size() + 9U); result += t_name; result += kOptionalSuffix; return result; } PADDLE_API void AssignTensorImpl(const Tensor& src, Tensor* dst); ////////////////////// Kernel Context //////////////////////// class PADDLE_API CustomOpKernelContext { public: CustomOpKernelContext() = default; void EmplaceBackInput(Tensor&& input); void EmplaceBackInputs(const std::vector& inputs); void EmplaceBackOutput(Tensor&& output); void EmplaceBackOutputs(const std::vector& outputs); void EmplaceBackAttr(paddle::any attr); void EmplaceBackAttrs(const std::vector& attrs) { attrs_ = std::move(attrs); } const std::pair& InputRangeAt(size_t idx) const; const std::pair& OutputRangeAt(size_t idx) const; const Tensor& InputAt(size_t idx) const; std::vector InputsBetween(size_t start, size_t end) const; Tensor& MutableInputAt(size_t idx); paddle::optional OptionalInputAt(size_t idx); paddle::optional> OptionalInputsBetween(size_t start, size_t end); const std::vector& Attrs() const { return attrs_; } const std::vector>& InputRange() { return input_range_; } const std::vector>& OutputRange() { return output_range_; } Tensor* MutableOutputAt(size_t idx); std::vector MutableOutputBetweeen(size_t start, size_t end); std::vector OutputsBetweeen(size_t start, size_t end); std::vector* AllMutableOutput(); template AttrType AttrAt(size_t idx) const { try { return paddle::any_cast(attrs_.at(idx)); } catch (paddle::bad_any_cast&) { PD_THROW("Attribute cast error in Custom Op Kernel Context."); } } // handle inplace map void MapPlainOutputs( const std::vector& inputs, const std::vector& outputs, const std::unordered_map& inplace_map); void AssignInplaceOutputs(); std::vector* AllMutablePlainOutput(); std::unordered_map GetInplaceTensorMap(); private: // TODO(chenweihang): replaced be SmallVector std::vector inputs_; std::vector outputs_; std::vector attrs_; // handle inplace map std::vector plain_outputs_; std::unordered_map inplace_tensor_map_; std::vector> input_range_; std::vector> output_range_; }; ////////////////////// Kernel Function (PD_KERNEL) //////////////////////// // Record Op kernel core function using KernelFunc = void (*)(CustomOpKernelContext*); #define PD_SPECIALIZE_ComputeCallHelper(attr_type) \ template \ struct ComputeCallHelper { \ template \ static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) { \ attr_type arg = ctx->AttrAt(attr_idx); \ ComputeCallHelper< \ Tail...>::template Compute(ctx, \ pargs..., \ arg); \ } \ } template struct TypeTag {}; template struct KernelFuncImpl; template struct KernelFuncImpl { static void Compute(CustomOpKernelContext* ctx) { ComputeCallHelper>::template Compute<0, 0, 0>(ctx); } private: template struct ComputeCallHelper; // Handle args for general Tensor input case template struct ComputeCallHelper { template static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) { auto& range = ctx->InputRangeAt(in_idx); auto& arg = ctx->MutableInputAt(range.first); ComputeCallHelper< Tail...>::template Compute(ctx, pargs..., arg); } }; // Handle args for optional Tensor input case template struct ComputeCallHelper&, Tail...> { template static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) { auto& range = ctx->InputRangeAt(in_idx); auto& arg = ctx->InputAt(range.first); if (!arg.is_initialized()) { ComputeCallHelper:: template Compute( ctx, pargs..., paddle::none); } else { ComputeCallHelper< Tail...>::template Compute(ctx, pargs..., arg); } } }; // Handle args for inplace Tensor case template struct ComputeCallHelper { template static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) { auto& range = ctx->InputRangeAt(in_idx); auto& arg = ctx->MutableInputAt(range.first); ComputeCallHelper< Tail...>::template Compute(ctx, pargs..., arg); } }; // Handle args for optional inplace Tensor input case template struct ComputeCallHelper&, Tail...> { template static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) { auto& range = ctx->InputRangeAt(in_idx); auto arg = ctx->OptionalInputAt(range.first); ComputeCallHelper< Tail...>::template Compute(ctx, pargs..., arg); } }; // Handle args for general vector input case template struct ComputeCallHelper&, Tail...> { template static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) { auto& range = ctx->InputRangeAt(in_idx); auto arg = ctx->InputsBetween(range.first, range.second); ComputeCallHelper< Tail...>::template Compute(ctx, pargs..., arg); } }; // Handle args for optional vector input case template struct ComputeCallHelper>&, Tail...> { template static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) { auto& range = ctx->InputRangeAt(in_idx); auto arg = ctx->InputsBetween(range.first, range.second); if (arg.empty() || !arg[0].is_initialized()) { ComputeCallHelper:: template Compute( ctx, pargs..., paddle::none); } else { ComputeCallHelper< Tail...>::template Compute(ctx, pargs..., arg); } } }; // Handle args for inplace vector case template struct ComputeCallHelper&, Tail...> { template static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) { auto& range = ctx->InputRangeAt(in_idx); auto arg = ctx->InputsBetween(range.first, range.second); ComputeCallHelper< Tail...>::template Compute(ctx, pargs..., arg); } }; // Handle args for optional inplace vector case template struct ComputeCallHelper>&, Tail...> { template static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) { auto& range = ctx->InputRangeAt(in_idx); auto arg = ctx->OptionalInputsBetween(range.first, range.second); ComputeCallHelper< Tail...>::template Compute(ctx, pargs..., arg); } }; PD_SPECIALIZE_ComputeCallHelper(bool); PD_SPECIALIZE_ComputeCallHelper(int); PD_SPECIALIZE_ComputeCallHelper(float); PD_SPECIALIZE_ComputeCallHelper(int64_t); PD_SPECIALIZE_ComputeCallHelper(const std::string&); PD_SPECIALIZE_ComputeCallHelper(const std::vector&); PD_SPECIALIZE_ComputeCallHelper(const std::vector&); PD_SPECIALIZE_ComputeCallHelper(const std::vector&); PD_SPECIALIZE_ComputeCallHelper(const std::vector&); // TODO(chenweihang): support other attribute type if needed. // Why not support other attribute type here? // - paddle::blank, std::vector and std::vector // are not used in op // - BlockDesc* and std::vector are used in framework // NOTE(chenweihang): Used to be compatible with the 2.0.1 released // interface, and will be deprecated in the future PD_SPECIALIZE_ComputeCallHelper(const bool&); PD_SPECIALIZE_ComputeCallHelper(const int&); PD_SPECIALIZE_ComputeCallHelper(const float&); PD_SPECIALIZE_ComputeCallHelper(const int64_t&); // NOTE(chenweihang): Used to be compatible with the 2.1 released // interface, but not recommended PD_SPECIALIZE_ComputeCallHelper(std::string); PD_SPECIALIZE_ComputeCallHelper(std::vector); PD_SPECIALIZE_ComputeCallHelper(std::vector); PD_SPECIALIZE_ComputeCallHelper(std::vector); PD_SPECIALIZE_ComputeCallHelper(std::vector); // Used to be compatible with 2.3 released internal inplace interface, not // recommended // Handle args for compatible inplace case template struct ComputeCallHelper { template static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) { auto& range = ctx->OutputRangeAt(out_idx); auto* arg = ctx->MutableOutputAt(range.first); ComputeCallHelper< Tail...>::template Compute(ctx, pargs..., arg); } }; // Used to be compatible with 2.3 released internal inplace interface, not // recommended // TODO(chenweihang): What is the appropriate output form? // std::vector*? or std::vector? or std::vector* // Handle args for compatible inplace case template struct ComputeCallHelper, Tail...> { template static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) { auto& range = ctx->OutputRangeAt(out_idx); auto arg = ctx->MutableOutputBetweeen(range.first, range.second); ComputeCallHelper< Tail...>::template Compute(ctx, pargs..., arg); } }; template struct ComputeReturnHelper; // For compatibility with the original custom op form template struct ComputeReturnHelper> { static void Compute(CustomOpKernelContext* ctx, Args&... args) { static_assert(out_idx == 0, "If return std::vector in Custom OpKernel, " "you cannot pass output by kernel function argument."); auto outs = impl_fn(args...); auto* orig_outs = ctx->AllMutablePlainOutput(); PD_CHECK(orig_outs->size() == outs.size(), "The number of element in custom operator outputs is wrong, " "expected contains ", orig_outs->size(), " Tensors, but actually contains ", outs.size(), " Tensors."); for (size_t i = 0; i < outs.size(); ++i) { AssignTensorImpl(outs.at(i), orig_outs->at(i)); } } }; template struct ComputeReturnHelper { static void Compute(CustomOpKernelContext* ctx, Args&... args) { impl_fn(args...); } }; // end: base template template struct ComputeCallHelper> { template static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) { ComputeReturnHelper::Compute(ctx, pargs...); } }; }; #define PD_KERNEL(...) \ ::paddle::KernelFuncImpl::Compute /////////////// InferShape Function (PD_INFER_SHAPE) /////////////// // Record Op infershape core function using InferShapeFunc = std::vector> (*)( const std::vector>& input_shapes, const std::vector>>& vec_input_shapes, const std::vector& attrs); #define PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPE(input_type) \ template \ struct InferShapeCallHelper { \ template \ static Return InferShape( \ const std::vector>& input_shapes, \ const std::vector>>& \ vec_input_shapes, \ const std::vector& attrs, \ const PreviousArgs&... pargs) { \ input_type arg = input_shapes[in_idx]; \ return InferShapeCallHelper:: \ template InferShape( \ input_shapes, vec_input_shapes, attrs, pargs..., arg); \ } \ } #define PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPES(input_type) \ template \ struct InferShapeCallHelper { \ template \ static Return InferShape( \ const std::vector>& input_shapes, \ const std::vector>>& \ vec_input_shapes, \ const std::vector& attrs, \ const PreviousArgs&... pargs) { \ input_type arg = vec_input_shapes[vec_in_idx]; \ return InferShapeCallHelper:: \ template InferShape( \ input_shapes, vec_input_shapes, attrs, pargs..., arg); \ } \ } #define PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(attr_type) \ template \ struct InferShapeCallHelper { \ template \ static Return InferShape( \ const std::vector>& input_shapes, \ const std::vector>>& \ vec_input_shapes, \ const std::vector& attrs, \ const PreviousArgs&... pargs) { \ try { \ attr_type arg = paddle::any_cast(attrs[attr_idx]); \ return InferShapeCallHelper:: \ template InferShape( \ input_shapes, vec_input_shapes, attrs, pargs..., arg); \ } catch (paddle::bad_any_cast&) { \ PD_THROW( \ "Attribute cast error in custom operator InferShapeFn. " \ "Expected " #attr_type \ " value. InferShapeFn's attribute list must be exactly same as " \ "Forward " \ "KernelFn's attribute list except std::vector " \ "attribute."); \ } \ } \ } template struct InferShapeFuncImpl; template struct InferShapeFuncImpl { static Return InferShape( const std::vector>& input_shapes, const std::vector>>& vec_input_shapes, const std::vector& attrs) { return InferShapeCallHelper>:: template InferShape<0, 0, 0>(input_shapes, vec_input_shapes, attrs); } private: template struct InferShapeCallHelper; PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPE(const std::vector&); PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPES( const std::vector>&); template struct InferShapeCallHelper>&, Tail...> { template static Return InferShape( const std::vector>& input_shapes, const std::vector>>& vec_input_shapes, const std::vector& attrs, const PreviousArgs&... pargs) { const std::vector& arg = input_shapes[in_idx]; if (arg.empty()) { return InferShapeCallHelper:: template InferShape( input_shapes, vec_input_shapes, attrs, pargs..., paddle::none); } else { return InferShapeCallHelper:: template InferShape( input_shapes, vec_input_shapes, attrs, pargs..., arg); } } }; template struct InferShapeCallHelper< const paddle::optional>>&, Tail...> { template static Return InferShape( const std::vector>& input_shapes, const std::vector>>& vec_input_shapes, const std::vector& attrs, const PreviousArgs&... pargs) { const std::vector>& arg = vec_input_shapes[vec_in_idx]; if (arg.empty()) { return InferShapeCallHelper:: template InferShape( input_shapes, vec_input_shapes, attrs, pargs..., paddle::none); } else { return InferShapeCallHelper:: template InferShape( input_shapes, vec_input_shapes, attrs, pargs..., arg); } } }; // NOTE(chenweihang): Used to be compatible with the 2.0.1 released // interface, and will be deprecated in the future PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPE(std::vector); PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPES( std::vector>); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(bool); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(int); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(float); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(int64_t); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(const std::string&); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(const std::vector&); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(const std::vector&); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(const std::vector&); // NOTE(chenweihang): InferShape can't support std::vector attr type, // because the input type is std::vector, only can use one rule to // parse std::vector parameter // NOTE(chenweihang): Used to be compatible with the 2.0.1 released // interface, and will be deprecated in the future PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(const bool&); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(const int&); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(const float&); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(const int64_t&); // NOTE(chenweihang): Used to be compatible with the 2.1 released // interface, but not recommended PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(std::string); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(std::vector); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(std::vector); PD_SPECIALIZE_InferShapeCallHelper_FOR_ATTR(std::vector); // end: base template template struct InferShapeCallHelper> { template static Return InferShape( const std::vector>& input_shapes, const std::vector>>& vec_input_shapes, const std::vector& attrs, const Args&... args) { return impl_fn(args...); } }; }; #define PD_INFER_SHAPE(...) \ ::paddle::InferShapeFuncImpl::InferShape /////////////// InferDataType Function (PD_INFER_DTYPE) /////////////// // Record Op Infer dtype core function using InferDtypeFunc = std::vector (*)( const std::vector& input_dtypes, const std::vector>& vec_input_dtypes); #define PD_SPECIALIZE_InferDtypeCallHelper_TO_DTYPE(input_type) \ template \ struct InferDtypeCallHelper { \ template \ static Return InferDtype( \ const std::vector& input_dtypes, \ const std::vector>& vec_input_dtypes, \ const PreviousArgs&... pargs) { \ input_type arg = input_dtypes[in_idx]; \ return InferDtypeCallHelper::template InferDtype( \ input_dtypes, vec_input_dtypes, pargs..., arg); \ } \ } #define PD_SPECIALIZE_InferDtypeCallHelper_FOR_DTYPES(input_type) \ template \ struct InferDtypeCallHelper { \ template \ static Return InferDtype( \ const std::vector& input_dtypes, \ const std::vector>& vec_input_dtypes, \ const PreviousArgs&... pargs) { \ input_type arg = vec_input_dtypes[vec_in_idx]; \ return InferDtypeCallHelper:: \ template InferDtype( \ input_dtypes, vec_input_dtypes, pargs..., arg); \ } \ } template struct InferDtypeFuncImpl; template struct InferDtypeFuncImpl { static Return InferDtype( const std::vector& input_dtypes, const std::vector>& vec_input_dtypes) { return InferDtypeCallHelper>::template InferDtype<0, 0>( input_dtypes, vec_input_dtypes); } private: template struct InferDtypeCallHelper; PD_SPECIALIZE_InferDtypeCallHelper_TO_DTYPE(const DataType&); PD_SPECIALIZE_InferDtypeCallHelper_FOR_DTYPES(const std::vector&); template struct InferDtypeCallHelper&, Tail...> { template static Return InferDtype( const std::vector& input_dtypes, const std::vector>& vec_input_dtypes, const PreviousArgs&... pargs) { const DataType& arg = input_dtypes[in_idx]; if (arg == DataType::UNDEFINED) { return InferDtypeCallHelper::template InferDtype( input_dtypes, vec_input_dtypes, pargs..., paddle::none); } else { return InferDtypeCallHelper::template InferDtype( input_dtypes, vec_input_dtypes, pargs..., arg); } } }; template struct InferDtypeCallHelper>&, Tail...> { template static Return InferDtype( const std::vector& input_dtypes, const std::vector>& vec_input_dtypes, const PreviousArgs&... pargs) { const std::vector& arg = vec_input_dtypes[vec_in_idx]; if (arg.empty()) { return InferDtypeCallHelper:: template InferDtype( input_dtypes, vec_input_dtypes, pargs..., paddle::none); } else { return InferDtypeCallHelper:: template InferDtype( input_dtypes, vec_input_dtypes, pargs..., arg); } } }; // NOTE(chenweihang): Used to be compatible with the 2.0.1 released // interface, and will be deprecated in the future PD_SPECIALIZE_InferDtypeCallHelper_TO_DTYPE(DataType); PD_SPECIALIZE_InferDtypeCallHelper_FOR_DTYPES(std::vector); // end: base template template struct InferDtypeCallHelper> { template static Return InferDtype( const std::vector& input_dtypes, const std::vector>& vec_input_dtypes, const Args&... args) { return impl_fn(args...); } }; }; #define PD_INFER_DTYPE(...) \ ::paddle::InferDtypeFuncImpl::InferDtype ////////////////////// Op Meta Info ////////////////////// class PADDLE_API OpMetaInfo { public: explicit OpMetaInfo(const std::string& op_name) : name_(op_name) {} // format: {"", "", ...} OpMetaInfo& Inputs(std::vector&& inputs); // format: {"", "", ...} OpMetaInfo& Outputs(std::vector&& outputs); // format: {":", ":", ...} OpMetaInfo& Attrs(std::vector&& attrs); // format: {":", // ":",...} OpMetaInfo& SetInplaceMap( std::unordered_map&& inplace_map); // format: PD_KERNEL(...) OpMetaInfo& SetKernelFn(KernelFunc&& func); // format: PD_INFER_SHAPE(...) OpMetaInfo& SetInferShapeFn(InferShapeFunc&& func); // format: PD_INFER_DTYPE(...) OpMetaInfo& SetInferDtypeFn(InferDtypeFunc&& func); private: friend class OpMetaInfoHelper; // 1. desc info std::string name_; std::vector inputs_; std::vector outputs_; std::vector attrs_; std::unordered_map inplace_map_; std::unordered_map inplace_reverse_map_; // 2. func info KernelFunc kernel_fn_{nullptr}; InferShapeFunc infer_shape_fn_{nullptr}; InferDtypeFunc infer_dtype_fn_{nullptr}; }; //////////////// Op Meta Info Helper ///////////////// class OpMetaInfoHelper { public: static const std::string& GetOpName(const paddle::OpMetaInfo& info) { return info.name_; } static const std::vector& GetInputs( const paddle::OpMetaInfo& info) { return info.inputs_; } static const std::vector& GetOutputs( const paddle::OpMetaInfo& info) { return info.outputs_; } static const std::vector& GetAttrs( const paddle::OpMetaInfo& info) { return info.attrs_; } static const std::unordered_map& GetInplaceMap( const paddle::OpMetaInfo& info) { return info.inplace_map_; } static const std::unordered_map& GetInplaceReverseMap(const paddle::OpMetaInfo& info) { return info.inplace_reverse_map_; } static const KernelFunc& GetKernelFn(const paddle::OpMetaInfo& info) { return info.kernel_fn_; } static const InferShapeFunc& GetInferShapeFn(const paddle::OpMetaInfo& info) { return info.infer_shape_fn_; } static const InferDtypeFunc& GetInferDtypeFn(const paddle::OpMetaInfo& info) { return info.infer_dtype_fn_; } }; //////////////// Op Meta Info Map ///////////////// class PADDLE_API OpMetaInfoMap { public: // this function's impl should keep in header file. // if move to cc file, meta info can not be added // into map static OpMetaInfoMap& Instance() { static OpMetaInfoMap g_custom_op_meta_info_map; return g_custom_op_meta_info_map; } std::vector& operator[](const std::string& name); const std::unordered_map>& GetMap() const; private: OpMetaInfoMap() = default; std::unordered_map> map_; PD_DISABLE_COPY_AND_ASSIGN(OpMetaInfoMap); }; //////////////// Op Meta Info Builder ///////////////// class PADDLE_API OpMetaInfoBuilder { public: explicit OpMetaInfoBuilder(std::string&& name, size_t index); OpMetaInfoBuilder& Inputs(std::vector&& inputs); OpMetaInfoBuilder& Outputs(std::vector&& outputs); OpMetaInfoBuilder& Attrs(std::vector&& attrs); OpMetaInfoBuilder& SetInplaceMap( std::unordered_map&& inplace_map); OpMetaInfoBuilder& SetKernelFn(KernelFunc func); OpMetaInfoBuilder& SetInferShapeFn(InferShapeFunc func); OpMetaInfoBuilder& SetInferDtypeFn(InferDtypeFunc func); private: // Forward Op name std::string name_; // ref current info ptr OpMetaInfo* info_ptr_; // The current op meta info index in vector // - 0: op, 1: grad_op, 2: grad_grad_op size_t index_; }; /////////////////////// Op register Macro ///////////////////////// #define PD_BUILD_OP(op_name) \ STATIC_ASSERT_GLOBAL_NAMESPACE( \ __reg_op__##op_name, "PD_BUILD_OP must be called in global namespace."); \ static ::paddle::OpMetaInfoBuilder __op_meta_info_##op_name##__ = \ ::paddle::OpMetaInfoBuilder(#op_name, 0) #define PD_BUILD_GRAD_OP(op_name) \ STATIC_ASSERT_GLOBAL_NAMESPACE( \ __reg_grad_op__##op_name, \ "PD_BUILD_GRAD_OP must be called in global namespace."); \ static ::paddle::OpMetaInfoBuilder __grad_op_meta_info_##op_name##__ = \ ::paddle::OpMetaInfoBuilder(#op_name, 1) #define PD_BUILD_DOUBLE_GRAD_OP(op_name) \ STATIC_ASSERT_GLOBAL_NAMESPACE( \ __reg_grad_grad_op__##op_name, \ "PD_BUILD_DOUBLE_GRAD_OP must be called in global namespace."); \ static ::paddle::OpMetaInfoBuilder __grad_grad_op_meta_info_##op_name##__ = \ ::paddle::OpMetaInfoBuilder(#op_name, 2) } // namespace paddle ///////////////////// C API /////////////////// #ifdef __cplusplus extern "C" { #endif #if defined(_WIN32) // C-API to get global OpMetaInfoMap. __declspec(dllexport) inline paddle::OpMetaInfoMap& PD_GetOpMetaInfoMap() { return paddle::OpMetaInfoMap::Instance(); } #endif // _WIN32 #ifdef __cplusplus } #endif