提交 1b7fe5e8 编写于 作者: P phlrain

add data transfer; test=develop

上级 96a39cc1
......@@ -394,7 +394,7 @@ cc_library(custom_operator SRCS custom_operator.cc DEPS tensor attribute framewo
cc_test(custom_tensor_test SRCS custom_tensor_test.cc DEPS custom_tensor glog)
#cc_binary(test_executor SRCS test_executor.cc DEPS executor op_registry ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS} )
cc_binary(new_executor SRCS new_exec.cc DEPS operator op_registry executor ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS} profiler)
#cc_binary(new_executor SRCS new_exec_test.cc DEPS operator op_registry executor ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS} profiler)
set(FLUID_FRAMEWORK_MODULES proto_desc memory lod_tensor executor data_feed_proto layer dynamic_loader custom_operator)
......
此差异已折叠。
......@@ -28,6 +28,6 @@ endif(NOT WIN32)
cc_library(gradient_accumulator SRCS gradient_accumulator.cc DEPS blas operator lod_tensor selected_rows selected_rows_functor var_type_traits layer math_function)
cc_binary(tracer_test SRCS tracer_test.cc DEPS tracer layer op_registry python pybind ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS} profiler )
#cc_binary(tracer_test SRCS tracer_test.cc DEPS tracer layer op_registry python pybind ${GLOB_OP_LIB} ${GLOB_OPERATOR_DEPS} profiler )
add_subdirectory(tests)
......@@ -76,7 +76,6 @@ class ElementwiseOp : public framework::OperatorWithKernel {
"its type to LOD_TENSOR.",
ctx->GetInputsVarType("X").front()));
}
if (ctx->GetInputDim("X") == ctx->GetInputDim("Y")) {
ctx->ShareDim("X", /*->*/ "Out");
ctx->ShareLoD("X", /*->*/ "Out");
......
......@@ -53,6 +53,7 @@ limitations under the License. */
#include "paddle/fluid/framework/trainer.h"
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/framework/version.h"
#include "paddle/fluid/framework/new_exec.h"
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/memory/allocation/allocator_strategy.h"
#include "paddle/fluid/memory/allocation/mmap_allocator.h"
......@@ -88,6 +89,7 @@ limitations under the License. */
#include "paddle/fluid/pybind/ps_gpu_wrapper_py.h"
#include "paddle/fluid/pybind/pybind_boost_headers.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/pybind/nccl_wrapper_py.h"
#endif
......@@ -1977,6 +1979,40 @@ All parameter, weight, gradient are variables in Paddle.
fetch_vars);
});
py::class_<framework::InterpreterCore>(m, "InterpreterCore")
.def(py::init<const platform::Place &, const ProgramDesc &>())
.def("run", [](InterpreterCore &self, const std::unordered_map<std::string, py::array>& input_dict, std::vector<std::string> vec_fetch_name) {
pybind11::gil_scoped_release release;
std::vector<framework::Tensor> vec_tensor;
std::vector<std::string> vec_name;
//vec_tensor.reserve( feed.size() );
//vec_tensor.reserve( feed.size ()) ;
//auto new_res = input_dict.cast<py::array>();
for ( auto & item : input_dict )
{
//cerr << "test flag " << test_flag << endl;
cerr << item.first << endl;
framework::LoDTensor t;
SetTensorFromPyArray<platform::CPUPlace>(&t, item.second,
platform::CPUPlace(), false);
cerr << t.dims() << endl;
cerr << t.data<float>()[0] << endl;
vec_name.push_back( item.first );
vec_tensor.push_back( t );
}
std::cerr << "11" << std::endl;
self.run(vec_name, vec_tensor, vec_fetch_name);
//self.Run(prog, scope, block_id, create_local_scope, create_vars,
// fetch_vars);
});
m.def("init_gflags", framework::InitGflags);
m.def("init_glog", framework::InitGLOG);
m.def("load_op_meta_info_and_register_op",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册