From 0508b94ba79e8b221b7b5c8bbfd48e7039acd9c8 Mon Sep 17 00:00:00 2001 From: JingZhuangzhuang <75348594+JZZ-NOTE@users.noreply.github.com> Date: Thu, 20 Oct 2022 14:29:13 +0800 Subject: [PATCH] Add _get_phi_kernel_name interface (#47032) * add _get_phi_kernel_name interface * remove inference interface * Revert "remove inference interface" This reverts commit 784a8a6c51fa2dc49a01c8699525298ac21b178f. --- paddle/fluid/pybind/inference_api.cc | 7 +++++++ python/paddle/fluid/core.py | 1 + python/paddle/fluid/inference/__init__.py | 2 +- python/paddle/inference/__init__.py | 7 ++++--- 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index d63acc682ad..d067f8e47fc 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -35,6 +35,7 @@ #include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/api/paddle_pass_builder.h" #include "paddle/fluid/inference/utils/io_utils.h" +#include "paddle/phi/core/compat/convert_utils.h" #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #include "paddle/phi/core/cuda_stream.h" @@ -401,6 +402,12 @@ void BindInferenceApi(py::module *m) { new paddle_infer::Predictor(config)); return pred; }); + m->def( + "_get_phi_kernel_name", + [](const std::string &fluid_op_name) { + return phi::TransToPhiKernelName(fluid_op_name); + }, + py::return_value_policy::reference); m->def("copy_tensor", &CopyPaddleInferTensor); m->def("paddle_dtype_size", &paddle::PaddleDtypeSize); m->def("paddle_tensor_to_bytes", &SerializePDTensorToBytes); diff --git a/python/paddle/fluid/core.py b/python/paddle/fluid/core.py index 1654fd827e5..18352757dd8 100644 --- a/python/paddle/fluid/core.py +++ b/python/paddle/fluid/core.py @@ -277,6 +277,7 @@ try: from .libpaddle import _get_current_stream from .libpaddle import _Profiler, _ProfilerResult, _RecordEvent from .libpaddle import _set_current_stream + from .libpaddle import _get_phi_kernel_name if sys.platform != 'win32': from .libpaddle import _set_process_pids from .libpaddle import _erase_process_pids diff --git a/python/paddle/fluid/inference/__init__.py b/python/paddle/fluid/inference/__init__.py index e5e39054e6f..d6b8b102487 100644 --- a/python/paddle/fluid/inference/__init__.py +++ b/python/paddle/fluid/inference/__init__.py @@ -15,4 +15,4 @@ from .wrapper import Config, DataType, PlaceType, PrecisionType, Tensor, Predictor from .wrapper import convert_to_mixed_precision -from ..core import create_predictor, get_version, get_num_bytes_of_data_type, PredictorPool, get_trt_compile_version, get_trt_runtime_version +from ..core import create_predictor, get_version, _get_phi_kernel_name, get_num_bytes_of_data_type, PredictorPool, get_trt_compile_version, get_trt_runtime_version diff --git a/python/paddle/inference/__init__.py b/python/paddle/inference/__init__.py index 4b066eb16a5..1b9a96f437b 100644 --- a/python/paddle/inference/__init__.py +++ b/python/paddle/inference/__init__.py @@ -20,6 +20,7 @@ from ..fluid.inference import Tensor # noqa: F401 from ..fluid.inference import Predictor # noqa: F401 from ..fluid.inference import create_predictor # noqa: F401 from ..fluid.inference import get_version # noqa: F401 +from ..fluid.inference import _get_phi_kernel_name from ..fluid.inference import get_trt_compile_version # noqa: F401 from ..fluid.inference import get_trt_runtime_version # noqa: F401 from ..fluid.inference import convert_to_mixed_precision # noqa: F401 @@ -28,7 +29,7 @@ from ..fluid.inference import PredictorPool # noqa: F401 __all__ = [ # noqa 'Config', 'DataType', 'PlaceType', 'PrecisionType', 'Tensor', 'Predictor', - 'create_predictor', 'get_version', 'get_trt_compile_version', - 'convert_to_mixed_precision', 'get_trt_runtime_version', - 'get_num_bytes_of_data_type', 'PredictorPool' + 'create_predictor', 'get_version', '_get_phi_kernel_name', + 'get_trt_compile_version', 'convert_to_mixed_precision', + 'get_trt_runtime_version', 'get_num_bytes_of_data_type', 'PredictorPool' ] -- GitLab