diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index d63acc682adfa5ab440b29044497cd6018d4219b..d067f8e47fc48ee0c231d3b8617b6593ec1eab1d 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -35,6 +35,7 @@ #include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/api/paddle_pass_builder.h" #include "paddle/fluid/inference/utils/io_utils.h" +#include "paddle/phi/core/compat/convert_utils.h" #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #include "paddle/phi/core/cuda_stream.h" @@ -401,6 +402,12 @@ void BindInferenceApi(py::module *m) { new paddle_infer::Predictor(config)); return pred; }); + m->def( + "_get_phi_kernel_name", + [](const std::string &fluid_op_name) { + return phi::TransToPhiKernelName(fluid_op_name); + }, + py::return_value_policy::reference); m->def("copy_tensor", &CopyPaddleInferTensor); m->def("paddle_dtype_size", &paddle::PaddleDtypeSize); m->def("paddle_tensor_to_bytes", &SerializePDTensorToBytes); diff --git a/python/paddle/fluid/core.py b/python/paddle/fluid/core.py index 1654fd827e5ec76c87ec310a0da34db6a6d201ad..18352757dd83cdef71facb170639956f72f74f52 100644 --- a/python/paddle/fluid/core.py +++ b/python/paddle/fluid/core.py @@ -277,6 +277,7 @@ try: from .libpaddle import _get_current_stream from .libpaddle import _Profiler, _ProfilerResult, _RecordEvent from .libpaddle import _set_current_stream + from .libpaddle import _get_phi_kernel_name if sys.platform != 'win32': from .libpaddle import _set_process_pids from .libpaddle import _erase_process_pids diff --git a/python/paddle/fluid/inference/__init__.py b/python/paddle/fluid/inference/__init__.py index e5e39054e6ff45553b821d26a31976fa35eb4f6d..d6b8b102487923d84b90bafba8ce0bd52f09b6b3 100644 --- a/python/paddle/fluid/inference/__init__.py +++ b/python/paddle/fluid/inference/__init__.py @@ -15,4 +15,4 @@ from .wrapper import Config, DataType, PlaceType, PrecisionType, Tensor, Predictor from .wrapper import convert_to_mixed_precision -from ..core import create_predictor, get_version, get_num_bytes_of_data_type, PredictorPool, get_trt_compile_version, get_trt_runtime_version +from ..core import create_predictor, get_version, _get_phi_kernel_name, get_num_bytes_of_data_type, PredictorPool, get_trt_compile_version, get_trt_runtime_version diff --git a/python/paddle/inference/__init__.py b/python/paddle/inference/__init__.py index 4b066eb16a5374f412255cc148517865655a130b..1b9a96f437bbd2f9fae49fe28c3c4d40bd48cff8 100644 --- a/python/paddle/inference/__init__.py +++ b/python/paddle/inference/__init__.py @@ -20,6 +20,7 @@ from ..fluid.inference import Tensor # noqa: F401 from ..fluid.inference import Predictor # noqa: F401 from ..fluid.inference import create_predictor # noqa: F401 from ..fluid.inference import get_version # noqa: F401 +from ..fluid.inference import _get_phi_kernel_name from ..fluid.inference import get_trt_compile_version # noqa: F401 from ..fluid.inference import get_trt_runtime_version # noqa: F401 from ..fluid.inference import convert_to_mixed_precision # noqa: F401 @@ -28,7 +29,7 @@ from ..fluid.inference import PredictorPool # noqa: F401 __all__ = [ # noqa 'Config', 'DataType', 'PlaceType', 'PrecisionType', 'Tensor', 'Predictor', - 'create_predictor', 'get_version', 'get_trt_compile_version', - 'convert_to_mixed_precision', 'get_trt_runtime_version', - 'get_num_bytes_of_data_type', 'PredictorPool' + 'create_predictor', 'get_version', '_get_phi_kernel_name', + 'get_trt_compile_version', 'convert_to_mixed_precision', + 'get_trt_runtime_version', 'get_num_bytes_of_data_type', 'PredictorPool' ]