未验证 提交 65d98752 编写于 作者: F flame 提交者: GitHub

python inference enable_memory_optim(#18817)

python inference API support enable_memory_optim
上级 fd3b666d
...@@ -37,19 +37,21 @@ using paddle::NativeConfig; ...@@ -37,19 +37,21 @@ using paddle::NativeConfig;
using paddle::NativePaddlePredictor; using paddle::NativePaddlePredictor;
using paddle::AnalysisPredictor; using paddle::AnalysisPredictor;
static void BindPaddleDType(py::module *m); namespace {
static void BindPaddleBuf(py::module *m); void BindPaddleDType(py::module *m);
static void BindPaddleTensor(py::module *m); void BindPaddleBuf(py::module *m);
static void BindPaddlePlace(py::module *m); void BindPaddleTensor(py::module *m);
static void BindPaddlePredictor(py::module *m); void BindPaddlePlace(py::module *m);
static void BindNativeConfig(py::module *m); void BindPaddlePredictor(py::module *m);
static void BindNativePredictor(py::module *m); void BindNativeConfig(py::module *m);
static void BindAnalysisConfig(py::module *m); void BindNativePredictor(py::module *m);
static void BindAnalysisPredictor(py::module *m); void BindAnalysisConfig(py::module *m);
void BindAnalysisPredictor(py::module *m);
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
static void BindMkldnnQuantizerConfig(py::module *m); void BindMkldnnQuantizerConfig(py::module *m);
#endif #endif
} // namespace
void BindInferenceApi(py::module *m) { void BindInferenceApi(py::module *m) {
BindPaddleDType(m); BindPaddleDType(m);
...@@ -71,6 +73,7 @@ void BindInferenceApi(py::module *m) { ...@@ -71,6 +73,7 @@ void BindInferenceApi(py::module *m) {
m->def("paddle_dtype_size", &paddle::PaddleDtypeSize); m->def("paddle_dtype_size", &paddle::PaddleDtypeSize);
} }
namespace {
void BindPaddleDType(py::module *m) { void BindPaddleDType(py::module *m) {
py::enum_<PaddleDType>(*m, "PaddleDType") py::enum_<PaddleDType>(*m, "PaddleDType")
.value("FLOAT32", PaddleDType::FLOAT32) .value("FLOAT32", PaddleDType::FLOAT32)
...@@ -227,6 +230,8 @@ void BindAnalysisConfig(py::module *m) { ...@@ -227,6 +230,8 @@ void BindAnalysisConfig(py::module *m) {
.def("switch_ir_optim", &AnalysisConfig::SwitchIrOptim, .def("switch_ir_optim", &AnalysisConfig::SwitchIrOptim,
py::arg("x") = true) py::arg("x") = true)
.def("ir_optim", &AnalysisConfig::ir_optim) .def("ir_optim", &AnalysisConfig::ir_optim)
.def("enable_memory_optim", &AnalysisConfig::EnableMemoryOptim)
.def("set_optim_cache_dir", &AnalysisConfig::SetOptimCacheDir)
.def("switch_use_feed_fetch_ops", &AnalysisConfig::SwitchUseFeedFetchOps, .def("switch_use_feed_fetch_ops", &AnalysisConfig::SwitchUseFeedFetchOps,
py::arg("x") = true) py::arg("x") = true)
.def("use_feed_fetch_ops_enabled", .def("use_feed_fetch_ops_enabled",
...@@ -312,6 +317,6 @@ void BindAnalysisPredictor(py::module *m) { ...@@ -312,6 +317,6 @@ void BindAnalysisPredictor(py::module *m) {
.def("SaveOptimModel", &AnalysisPredictor::SaveOptimModel, .def("SaveOptimModel", &AnalysisPredictor::SaveOptimModel,
py::arg("dir")); py::arg("dir"));
} }
} // namespace
} // namespace pybind } // namespace pybind
} // namespace paddle } // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册