未验证 提交 6529a394 编写于 作者: W wanghuancoder 提交者: GitHub

InterpreterCore run (#33640)

* runtimecontext

* ExecutionContextV2

* refine

* refine

* pass test

* fix bug

* fix bug

* InterpreterCore run
上级 52f50def
...@@ -40,6 +40,7 @@ limitations under the License. */ ...@@ -40,6 +40,7 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_rank_table.h" #include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/lod_tensor_array.h"
#include "paddle/fluid/framework/new_exec.h"
#include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/op_version_registry.h"
...@@ -53,7 +54,6 @@ limitations under the License. */ ...@@ -53,7 +54,6 @@ limitations under the License. */
#include "paddle/fluid/framework/trainer.h" #include "paddle/fluid/framework/trainer.h"
#include "paddle/fluid/framework/type_defs.h" #include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/version.h" #include "paddle/fluid/framework/version.h"
#include "paddle/fluid/framework/new_exec.h"
#include "paddle/fluid/imperative/layer.h" #include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/memory/allocation/allocator_strategy.h" #include "paddle/fluid/memory/allocation/allocator_strategy.h"
#include "paddle/fluid/memory/allocation/mmap_allocator.h" #include "paddle/fluid/memory/allocation/mmap_allocator.h"
...@@ -90,7 +90,6 @@ limitations under the License. */ ...@@ -90,7 +90,6 @@ limitations under the License. */
#include "paddle/fluid/pybind/ps_gpu_wrapper_py.h" #include "paddle/fluid/pybind/ps_gpu_wrapper_py.h"
#include "paddle/fluid/pybind/pybind_boost_headers.h" #include "paddle/fluid/pybind/pybind_boost_headers.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/pybind/nccl_wrapper_py.h" #include "paddle/fluid/pybind/nccl_wrapper_py.h"
#endif #endif
...@@ -1897,45 +1896,67 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -1897,45 +1896,67 @@ All parameter, weight, gradient are variables in Paddle.
}); });
py::class_<framework::InterpreterCore>(m, "InterpreterCore") py::class_<framework::InterpreterCore>(m, "InterpreterCore")
.def(py::init<const platform::Place &, const ProgramDesc &, const ProgramDesc &>()) .def(py::init<const platform::Place &, const ProgramDesc &,
.def("run", [](InterpreterCore &self, const std::unordered_map<std::string, py::array>& input_dict, std::vector<std::string> vec_fetch_name) { const ProgramDesc &>())
pybind11::gil_scoped_release release; .def("run",
std::vector<framework::Tensor> vec_tensor; [](InterpreterCore &self,
std::vector<std::string> vec_name; const std::unordered_map<std::string, py::array> &input_dict,
//vec_tensor.reserve( feed.size() ); std::vector<std::string> vec_fetch_name) {
//vec_tensor.reserve( feed.size ()) ; pybind11::gil_scoped_release release;
std::vector<framework::Tensor> vec_tensor;
//auto new_res = input_dict.cast<py::array>(); std::vector<std::string> vec_name;
// vec_tensor.reserve( feed.size() );
for ( auto & item : input_dict ) // vec_tensor.reserve( feed.size ()) ;
{
//cerr << "test flag " << test_flag << endl; // auto new_res = input_dict.cast<py::array>();
//cerr << item.first << endl;
framework::LoDTensor t; for (auto &item : input_dict) {
SetTensorFromPyArray<platform::CPUPlace>(&t, item.second, // cerr << "test flag " << test_flag << endl;
platform::CPUPlace(), false); // cerr << item.first << endl;
framework::LoDTensor t;
//cerr << t.dims() << endl; SetTensorFromPyArray<platform::CPUPlace>(
//cerr << t.data<float>()[0] << endl; &t, item.second, platform::CPUPlace(), false);
vec_name.push_back( item.first ); // cerr << t.dims() << endl;
vec_tensor.push_back( t ); // cerr << t.data<float>()[0] << endl;
}
vec_name.push_back(item.first);
vec_tensor.push_back(t);
}
//std::cerr << "11" << std::endl;
std::vector<framework::Tensor> vec_out; // std::cerr << "11" << std::endl;
self.run(vec_name, vec_tensor, vec_fetch_name, vec_out); std::vector<framework::Tensor> vec_out;
//self.Run(prog, scope, block_id, create_local_scope, create_vars, self.run(vec_name, vec_tensor, vec_fetch_name, vec_out);
// fetch_vars); // self.Run(prog, scope, block_id, create_local_scope, create_vars,
std::vector< py::array> vec_ret; // fetch_vars);
for( size_t i = 0; i < vec_out.size(); ++i ) std::vector<py::array> vec_ret;
{ for (size_t i = 0; i < vec_out.size(); ++i) {
vec_ret.push_back( TensorToPyArray(vec_out[i], true) ) ; vec_ret.push_back(TensorToPyArray(vec_out[i], true));
} }
return vec_ret; return vec_ret;
}); })
.def("run",
[](InterpreterCore &self,
const std::unordered_map<std::string, LoDTensor> &input_dict,
std::vector<std::string> vec_fetch_name) {
pybind11::gil_scoped_release release;
std::vector<framework::Tensor> vec_tensor;
std::vector<std::string> vec_name;
for (auto &item : input_dict) {
vec_name.push_back(item.first);
vec_tensor.push_back(item.second);
}
std::vector<framework::Tensor> vec_out;
self.run(vec_name, vec_tensor, vec_fetch_name, vec_out);
std::vector<py::array> vec_ret;
for (size_t i = 0; i < vec_out.size(); ++i) {
vec_ret.push_back(TensorToPyArray(vec_out[i], true));
}
return vec_ret;
});
m.def("init_gflags", framework::InitGflags); m.def("init_gflags", framework::InitGflags);
m.def("init_glog", framework::InitGLOG); m.def("init_glog", framework::InitGLOG);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册