/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include #include #include #include #include #include "paddle/fluid/extension/include/dll_decl.h" #include "paddle/fluid/extension/include/tensor.h" /** * Op Meta Info Related Define. * * Used to maintain operator core information. * */ namespace paddle { namespace framework { class PD_DLL_DECL OpMetaInfoHelper; } // namespace framework using Tensor = paddle::Tensor; #define PD_DISABLE_COPY_AND_ASSIGN(classname) \ private: \ classname(const classname&) = delete; \ classname(classname&&) = delete; \ classname& operator=(const classname&) = delete; \ classname& operator=(classname&&) = delete #if defined _WIN32 #define HANDLE_THE_ERROR try { #define END_HANDLE_THE_ERROR \ } \ catch (const std::exception& e) { \ std::cerr << e.what() << std::endl; \ throw e; \ } #else #define HANDLE_THE_ERROR #define END_HANDLE_THE_ERROR #endif #define PD_THROW(err_msg) \ do { \ HANDLE_THE_ERROR \ throw std::runtime_error(err_msg); \ END_HANDLE_THE_ERROR \ } while (0) ///////////////// Util Define and Function //////////////// inline std::string Grad(const std::string& var_name) { std::string result; result.reserve(var_name.size() + 5U); result += var_name; result += "@GRAD"; return result; } ////////////////////// Kernel Function (PD_KERNEL) //////////////////////// // Record Op kernel core function using KernelFunc = std::vector (*)(std::vector inputs, std::vector attrs); template struct TypeTag {}; template struct KernelFuncImpl; template struct KernelFuncImpl { static Return Compute(std::vector inputs, std::vector attrs) { return ComputeCallHelper>::template Compute<0, 0>( inputs, attrs); } private: template struct ComputeCallHelper; // for Tensor input template struct ComputeCallHelper { template static Return Compute(std::vector inputs, std::vector attrs, const PreviousArgs&... pargs) { static_assert(attr_idx == 0, "Input tensor should appear before attributes."); const Tensor& arg = inputs[in_idx]; return ComputeCallHelper::template Compute( inputs, attrs, pargs..., arg); } }; // TODO(chenweihang): add support for attribute input // int attribute input (not used now) template struct ComputeCallHelper { template static Return Compute(std::vector inputs, std::vector attrs, const PreviousArgs&... pargs) { try { int arg = boost::any_cast(attrs[attr_idx]); return ComputeCallHelper::template Compute( inputs, attrs, pargs..., arg); } catch (boost::bad_any_cast&) { PD_THROW( "Attribute cast error in custom operator. Expected int value."); } } }; // end: base template template struct ComputeCallHelper> { template static Return Compute(std::vector inputs, std::vector attrs, const Args&... args) { return impl_fn(args...); } }; }; #define PD_KERNEL(...) \ ::paddle::KernelFuncImpl::Compute /////////////// InferShape Function (PD_INFER_SHAPE) /////////////// // Record Op infershape core function using InferShapeFunc = std::vector> (*)( std::vector> input_shapes); template struct InferShapeFuncImpl; template struct InferShapeFuncImpl { static Return InferShape(std::vector> input_shapes) { return InferShapeCallHelper>::template InferShape<0>( input_shapes); } private: template struct InferShapeCallHelper; // only one type input: std::vector template struct InferShapeCallHelper, Tail...> { template static Return InferShape(std::vector> input_shapes, const PreviousArgs&... pargs) { std::vector arg = input_shapes[in_idx]; return InferShapeCallHelper::template InferShape( input_shapes, pargs..., arg); } }; // end: base template template struct InferShapeCallHelper> { template static Return InferShape(std::vector> input_shapes, const Args&... args) { return impl_fn(args...); } }; }; #define PD_INFER_SHAPE(...) \ ::paddle::InferShapeFuncImpl::InferShape /////////////// InferDataType Function (PD_INFER_DTYPE) /////////////// // Record Op Infer dtype core function using InferDtypeFunc = std::vector (*)(std::vector input_dtypes); template struct InferDtypeFuncImpl; template struct InferDtypeFuncImpl { static Return InferDtype(std::vector input_dtypes) { return InferDtypeCallHelper>::template InferDtype<0>( input_dtypes); } private: template struct InferDtypeCallHelper; // Only one type input now: DataType template struct InferDtypeCallHelper { template static Return InferDtype(std::vector input_dtypes, const PreviousArgs&... pargs) { DataType arg = input_dtypes[in_idx]; return InferDtypeCallHelper::template InferDtype( input_dtypes, pargs..., arg); } }; // end: base template template struct InferDtypeCallHelper> { template static Return InferDtype(std::vector input_dtypes, const Args&... args) { return impl_fn(args...); } }; }; #define PD_INFER_DTYPE(...) \ ::paddle::InferDtypeFuncImpl::InferDtype ////////////////////// Op Meta Info ////////////////////// class PD_DLL_DECL OpMetaInfo { public: explicit OpMetaInfo(const std::string& op_name) : name_(op_name) {} OpMetaInfo& Inputs(std::vector&& inputs); OpMetaInfo& Outputs(std::vector&& outputs); OpMetaInfo& SetKernelFn(KernelFunc&& func); OpMetaInfo& SetInferShapeFn(InferShapeFunc&& func); OpMetaInfo& SetInferDtypeFn(InferDtypeFunc&& func); private: friend class framework::OpMetaInfoHelper; // 1. desc info std::string name_; std::vector inputs_; std::vector outputs_; std::vector attrs_; // 2. func info KernelFunc kernel_fn_; InferShapeFunc infer_shape_fn_; InferDtypeFunc infer_dtype_fn_; }; //////////////// Op Meta Info Map ///////////////// class PD_DLL_DECL OpMetaInfoMap { public: // this function's impl should keep in header file. // if move to cc file, meta info can not be added // into map static OpMetaInfoMap& Instance() { static OpMetaInfoMap g_custom_op_meta_info_map; return g_custom_op_meta_info_map; } std::vector& operator[](const std::string& name); const std::unordered_map>& GetMap() const; private: OpMetaInfoMap() = default; std::unordered_map> map_; PD_DISABLE_COPY_AND_ASSIGN(OpMetaInfoMap); }; //////////////// Op Meta Info Builder ///////////////// class PD_DLL_DECL OpMetaInfoBuilder { public: explicit OpMetaInfoBuilder(std::string&& name); OpMetaInfoBuilder& Inputs(std::vector&& inputs); OpMetaInfoBuilder& Outputs(std::vector&& outputs); OpMetaInfoBuilder& SetKernelFn(KernelFunc func); OpMetaInfoBuilder& SetInferShapeFn(InferShapeFunc func); OpMetaInfoBuilder& SetInferDtypeFn(InferDtypeFunc func); OpMetaInfoBuilder& SetBackwardOp(const std::string& bwd_op_name); private: // Forward Op name std::string name_; // Point to the currently constructed op meta info OpMetaInfo* info_ptr_; }; /////////////////////// Op register API ///////////////////////// // For inference: compile directly with framework // Call after PD_BUILD_OP(...) void RegisterAllCustomOperator(); // Using this api to load compiled custom operator's dynamic library and // register Custom // Operator into it void LoadCustomOperatorLib(const std::string& dso_name); /////////////////////// Op register Macro ///////////////////////// #define PD_BUILD_OP_WITH_COUNTER(op_name, counter) \ static ::paddle::OpMetaInfoBuilder __op_meta_info_##counter##__ = \ ::paddle::OpMetaInfoBuilder(op_name) #define PD_BUILD_OP_INNER(op_name, counter) \ PD_BUILD_OP_WITH_COUNTER(op_name, counter) #define PD_BUILD_OP(op_name) PD_BUILD_OP_INNER(op_name, __COUNTER__) } // namespace paddle ///////////////////// C API /////////////////// #ifdef __cplusplus extern "C" { #endif #if defined(_WIN32) // C-API to get global OpMetaInfoMap. __declspec(dllexport) inline paddle::OpMetaInfoMap& PD_GetOpMetaInfoMap() { return paddle::OpMetaInfoMap::Instance(); } #endif // _WIN32 #ifdef __cplusplus } #endif