From 40e51b258c36d51d63a546dd02c9f13e40294abb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=9F=B3=E6=99=93=E4=BC=9F?= <39303645+Shixiaowei02@users.noreply.github.com> Date: Mon, 26 Apr 2021 11:11:41 +0800 Subject: [PATCH] python inference supports custom operators, test=develop (#32533) --- paddle/fluid/framework/custom_operator.h | 3 ++ paddle/fluid/inference/api/CMakeLists.txt | 4 +-- .../fluid/inference/api/analysis_predictor.cc | 2 +- paddle/fluid/inference/api/helper.cc | 18 ++++++++++++ paddle/fluid/inference/api/helper.h | 2 ++ .../custom_op/test_custom_relu_op_setup.py | 29 +++++++++++++++++++ 6 files changed, 55 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/framework/custom_operator.h b/paddle/fluid/framework/custom_operator.h index 117841f80cf..259901c09f3 100644 --- a/paddle/fluid/framework/custom_operator.h +++ b/paddle/fluid/framework/custom_operator.h @@ -28,5 +28,8 @@ void LoadOpMetaInfoAndRegisterOp(const std::string& dso_name); void RegisterOperatorWithMetaInfoMap( const paddle::OpMetaInfoMap& op_meta_info_map); +// Interface for selective register custom op. +void RegisterOperatorWithMetaInfo(const std::vector& op_meta_infos); + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index 03f86cc7ba6..82c95ba2c95 100755 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -32,10 +32,10 @@ cc_library(paddle_pass_builder SRCS paddle_pass_builder.cc) if(WITH_CRYPTO) cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS lod_tensor scope reset_tensor_array - analysis_config zero_copy_tensor trainer_desc_proto paddle_crypto) + analysis_config zero_copy_tensor trainer_desc_proto paddle_crypto custom_operator) else() cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS lod_tensor scope reset_tensor_array - analysis_config zero_copy_tensor trainer_desc_proto) + analysis_config zero_copy_tensor trainer_desc_proto custom_operator) endif() if(WIN32) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 95b08318368..6a6be14fd59 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -628,7 +628,7 @@ std::unique_ptr CreatePaddlePredictor< // This function can only be executed once per process. static std::once_flag custom_operators_registered; std::call_once(custom_operators_registered, - []() { paddle::RegisterAllCustomOperator(); }); + []() { inference::RegisterAllCustomOperator(); }); if (config.use_gpu()) { static std::once_flag gflags_initialized; diff --git a/paddle/fluid/inference/api/helper.cc b/paddle/fluid/inference/api/helper.cc index 9cc491e10d6..d78560239de 100644 --- a/paddle/fluid/inference/api/helper.cc +++ b/paddle/fluid/inference/api/helper.cc @@ -13,6 +13,9 @@ // limitations under the License. #include "paddle/fluid/inference/api/helper.h" +#include "paddle/fluid/extension/include/ext_op_meta_info.h" +#include "paddle/fluid/framework/custom_operator.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace inference { @@ -40,5 +43,20 @@ std::string to_string>>( return ss.str(); } +void RegisterAllCustomOperator() { + auto &op_meta_info_map = OpMetaInfoMap::Instance(); + const auto &meta_info_map = op_meta_info_map.GetMap(); + for (auto &pair : meta_info_map) { + const auto &all_op_kernels{framework::OperatorWithKernel::AllOpKernels()}; + if (all_op_kernels.find(pair.first) == all_op_kernels.end()) { + framework::RegisterOperatorWithMetaInfo(pair.second); + } else { + LOG(INFO) << "The operator `" << pair.first + << "` has been registered. " + "Therefore, we will not repeat the registration here."; + } + } +} + } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/helper.h index 14b968f5834..c6d25137594 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/helper.h @@ -398,5 +398,7 @@ static bool IsFileExists(const std::string &path) { return exists; } +void RegisterAllCustomOperator(); + } // namespace inference } // namespace paddle diff --git a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py index 7f5c76d0aee..642e93ebcb8 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_relu_op_setup.py @@ -255,6 +255,35 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): format(predict, predict_infer)) paddle.disable_static() + def test_static_save_and_run_inference_predictor(self): + paddle.enable_static() + np_data = np.random.random((1, 1, 28, 28)).astype("float32") + np_label = np.random.random((1, 1)).astype("int64") + path_prefix = "custom_op_inference/custom_relu" + from paddle.inference import Config + from paddle.inference import create_predictor + for device in self.devices: + predict = custom_relu_static_inference( + self.custom_ops[0], device, np_data, np_label, path_prefix) + # load inference model + config = Config(path_prefix + ".pdmodel", + path_prefix + ".pdiparams") + predictor = create_predictor(config) + input_tensor = predictor.get_input_handle(predictor.get_input_names( + )[0]) + input_tensor.reshape(np_data.shape) + input_tensor.copy_from_cpu(np_data.copy()) + predictor.run() + output_tensor = predictor.get_output_handle( + predictor.get_output_names()[0]) + predict_infer = output_tensor.copy_to_cpu() + self.assertTrue( + np.isclose( + predict, predict_infer, rtol=5e-5).any(), + "custom op predict: {},\n custom op infer predict: {}".format( + predict, predict_infer)) + paddle.disable_static() + if __name__ == '__main__': unittest.main() -- GitLab