未验证 提交 6529a394 编写于 作者: W wanghuancoder 提交者: GitHub

InterpreterCore run (#33640)

* runtimecontext

* ExecutionContextV2

* refine

* refine

* pass test

* fix bug

* fix bug

* InterpreterCore run
上级 52f50def
...@@ -40,6 +40,7 @@ limitations under the License. */ ...@@ -40,6 +40,7 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_rank_table.h" #include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/lod_tensor_array.h"
#include "paddle/fluid/framework/new_exec.h"
#include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/framework/op_version_registry.h"
...@@ -53,7 +54,6 @@ limitations under the License. */ ...@@ -53,7 +54,6 @@ limitations under the License. */
#include "paddle/fluid/framework/trainer.h" #include "paddle/fluid/framework/trainer.h"
#include "paddle/fluid/framework/type_defs.h" #include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/version.h" #include "paddle/fluid/framework/version.h"
#include "paddle/fluid/framework/new_exec.h"
#include "paddle/fluid/imperative/layer.h" #include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/memory/allocation/allocator_strategy.h" #include "paddle/fluid/memory/allocation/allocator_strategy.h"
#include "paddle/fluid/memory/allocation/mmap_allocator.h" #include "paddle/fluid/memory/allocation/mmap_allocator.h"
...@@ -90,7 +90,6 @@ limitations under the License. */ ...@@ -90,7 +90,6 @@ limitations under the License. */
#include "paddle/fluid/pybind/ps_gpu_wrapper_py.h" #include "paddle/fluid/pybind/ps_gpu_wrapper_py.h"
#include "paddle/fluid/pybind/pybind_boost_headers.h" #include "paddle/fluid/pybind/pybind_boost_headers.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/pybind/nccl_wrapper_py.h" #include "paddle/fluid/pybind/nccl_wrapper_py.h"
#endif #endif
...@@ -1897,42 +1896,64 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -1897,42 +1896,64 @@ All parameter, weight, gradient are variables in Paddle.
}); });
py::class_<framework::InterpreterCore>(m, "InterpreterCore") py::class_<framework::InterpreterCore>(m, "InterpreterCore")
.def(py::init<const platform::Place &, const ProgramDesc &, const ProgramDesc &>()) .def(py::init<const platform::Place &, const ProgramDesc &,
.def("run", [](InterpreterCore &self, const std::unordered_map<std::string, py::array>& input_dict, std::vector<std::string> vec_fetch_name) { const ProgramDesc &>())
.def("run",
[](InterpreterCore &self,
const std::unordered_map<std::string, py::array> &input_dict,
std::vector<std::string> vec_fetch_name) {
pybind11::gil_scoped_release release; pybind11::gil_scoped_release release;
std::vector<framework::Tensor> vec_tensor; std::vector<framework::Tensor> vec_tensor;
std::vector<std::string> vec_name; std::vector<std::string> vec_name;
//vec_tensor.reserve( feed.size() ); // vec_tensor.reserve( feed.size() );
//vec_tensor.reserve( feed.size ()) ; // vec_tensor.reserve( feed.size ()) ;
//auto new_res = input_dict.cast<py::array>(); // auto new_res = input_dict.cast<py::array>();
for ( auto & item : input_dict ) for (auto &item : input_dict) {
{ // cerr << "test flag " << test_flag << endl;
//cerr << "test flag " << test_flag << endl; // cerr << item.first << endl;
//cerr << item.first << endl;
framework::LoDTensor t; framework::LoDTensor t;
SetTensorFromPyArray<platform::CPUPlace>(&t, item.second, SetTensorFromPyArray<platform::CPUPlace>(
platform::CPUPlace(), false); &t, item.second, platform::CPUPlace(), false);
//cerr << t.dims() << endl; // cerr << t.dims() << endl;
//cerr << t.data<float>()[0] << endl; // cerr << t.data<float>()[0] << endl;
vec_name.push_back( item.first ); vec_name.push_back(item.first);
vec_tensor.push_back( t ); vec_tensor.push_back(t);
} }
// std::cerr << "11" << std::endl;
std::vector<framework::Tensor> vec_out;
self.run(vec_name, vec_tensor, vec_fetch_name, vec_out);
// self.Run(prog, scope, block_id, create_local_scope, create_vars,
// fetch_vars);
std::vector<py::array> vec_ret;
for (size_t i = 0; i < vec_out.size(); ++i) {
vec_ret.push_back(TensorToPyArray(vec_out[i], true));
}
return vec_ret;
})
.def("run",
[](InterpreterCore &self,
const std::unordered_map<std::string, LoDTensor> &input_dict,
std::vector<std::string> vec_fetch_name) {
pybind11::gil_scoped_release release;
std::vector<framework::Tensor> vec_tensor;
std::vector<std::string> vec_name;
for (auto &item : input_dict) {
vec_name.push_back(item.first);
vec_tensor.push_back(item.second);
}
//std::cerr << "11" << std::endl;
std::vector<framework::Tensor> vec_out; std::vector<framework::Tensor> vec_out;
self.run(vec_name, vec_tensor, vec_fetch_name, vec_out); self.run(vec_name, vec_tensor, vec_fetch_name, vec_out);
//self.Run(prog, scope, block_id, create_local_scope, create_vars,
// fetch_vars); std::vector<py::array> vec_ret;
std::vector< py::array> vec_ret; for (size_t i = 0; i < vec_out.size(); ++i) {
for( size_t i = 0; i < vec_out.size(); ++i ) vec_ret.push_back(TensorToPyArray(vec_out[i], true));
{
vec_ret.push_back( TensorToPyArray(vec_out[i], true) ) ;
} }
return vec_ret; return vec_ret;
}); });
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册