diff --git a/paddle/fluid/framework/new_exec.h b/paddle/fluid/framework/new_exec.h index 39be163eb1eb494dc00e044b8d70c1a6cb89684a..015d5b7bac7e8a6d999fcca23bc790bd26c4c495 100644 --- a/paddle/fluid/framework/new_exec.h +++ b/paddle/fluid/framework/new_exec.h @@ -539,9 +539,9 @@ void build_op_func_list( const framework::ProgramDesc& pdesc, std::vectorType() << endl; + //cerr << op->Type() << endl; //bool debug = op->Type() == "softmax_with_cross_entropy_grad"; - bool debug = true; + bool debug = false; //cerr << "create op" << endl; //auto op_base_u = OpRegistry::CreateOp(*op); @@ -638,16 +638,20 @@ void build_op_func_list( const framework::ProgramDesc& pdesc, std::vector(&(var->Get())); - cerr << "i " << i << "\t" << tensor_in->IsInitialized() << endl; + if( !tensor_in->IsInitialized() ) + { + continue; + } + //cerr << "i " << i << "\t" << tensor_in->IsInitialized() << endl; auto kernel_type_for_var = static_cast(op_base)->GetKernelTypeForVar( var_name_item.first, *tensor_in, expected_kernel_key); if( debug) @@ -669,7 +673,7 @@ void build_op_func_list( const framework::ProgramDesc& pdesc, std::vectorvar_list.push_back(std::unique_ptr(v)); VariableNameMap copy_in_map; - cerr << "ints name is " << input_names[var_name_item.first][i] << endl; + //cerr << "ints name is " << input_names[var_name_item.first][i] << endl; copy_in_map["X"] = { input_names[var_name_item.first][i] }; VariableNameMap copy_out_map; copy_out_map["Out"] = { new_var_name }; @@ -779,7 +783,7 @@ void exec_op_func_list( const std::vector& vec_func_list, input_vars.reserve(var_name_item.second.size()); for (auto& id : var_name_item.second) { - cerr << var_name_item.first << "\t " << id << endl; + //cerr << var_name_item.first << "\t " << id << endl; input_vars.emplace_back( var_scope.var_list[ id ].get() ); } ins_map.emplace( var_name_item.first, std::move(input_vars) ); @@ -792,7 +796,7 @@ void exec_op_func_list( const std::vector& vec_func_list, out_vars.reserve(var_name_item.second.size()); for (auto& id : var_name_item.second) { - cerr << var_name_item.first << "\t " << id << endl; + //cerr << var_name_item.first << "\t " << id << endl; out_vars.emplace_back( var_scope.var_list[ id ].get()); } outs_map.emplace( var_name_item.first, std::move( out_vars ) ); @@ -826,15 +830,23 @@ void exec_op_func_list( const std::vector& vec_func_list, class InterpreterCore { public: - InterpreterCore( const platform::Place& place, const ProgramDesc& prog ) : place_(place), prog_(prog) { + InterpreterCore( const platform::Place& place, const ProgramDesc& prog, const ProgramDesc& startup_prog) : place_(place), prog_(prog) { paddle::framework::InitDevices(); is_build = false; + paddle::framework::build_variable_scope( startup_prog, &global_scope ); + + + std::vector vec_func_list; + std::vector< paddle::framework::OperatorBase* > op_list; + paddle::framework::build_op_func_list( startup_prog, op_list, vec_func_list, &global_scope, place_); + } - void run( const std::vector vec_name, const std::vector& vec_tensor, const vector& vec_fetch_name) + void run( const std::vector vec_name, const std::vector& vec_tensor, const vector& vec_fetch_name, + std::vector& vec_out) { - cerr << "run" << endl; + //cerr << "run" << endl; // set static data if( is_build == false ) { @@ -843,13 +855,13 @@ public: for ( size_t i = 0; i < vec_name.size(); ++i ) { auto it = global_scope.name2id.find( vec_name[i] ); - cerr << "find " << ( it != global_scope.name2id.end() ) <second]->GetMutable(); - cerr << " get tensor" << endl; + //cerr << " get tensor" << endl; feed_tensor->ShareDataWith( vec_tensor[i] ); - cerr << "share buffer with" << endl; + //cerr << "share buffer with" << endl; } if( is_build == false ) @@ -873,14 +885,16 @@ public: //cerr << "out " << fetch_tensor->data()[0] << endl; if ( platform::is_gpu_place(fetch_tensor->place() ) ) { - cerr << "fetch gpu" << endl; + //cerr << "fetch gpu" << endl; Tensor out; platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto* dev_ctx = pool.Get(place_); dev_ctx->Wait(); TensorCopySync(*fetch_tensor, platform::CPUPlace(), &out); dev_ctx->Wait(); - cerr << "out " << out << endl; + //cerr << "out " << out << endl; + //cout << out.data()[0] << endl; + vec_out.push_back( out ); } else { diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index f864aa031f22782ae5354584e930899fbd9c93a8..67c9b5df5ca74bc666ff90e0e6e939b8c13949df 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1980,7 +1980,7 @@ All parameter, weight, gradient are variables in Paddle. }); py::class_(m, "InterpreterCore") - .def(py::init()) + .def(py::init()) .def("run", [](InterpreterCore &self, const std::unordered_map& input_dict, std::vector vec_fetch_name) { pybind11::gil_scoped_release release; std::vector vec_tensor; @@ -1993,13 +1993,13 @@ All parameter, weight, gradient are variables in Paddle. for ( auto & item : input_dict ) { //cerr << "test flag " << test_flag << endl; - cerr << item.first << endl; + //cerr << item.first << endl; framework::LoDTensor t; SetTensorFromPyArray(&t, item.second, platform::CPUPlace(), false); - cerr << t.dims() << endl; - cerr << t.data()[0] << endl; + //cerr << t.dims() << endl; + //cerr << t.data()[0] << endl; vec_name.push_back( item.first ); vec_tensor.push_back( t ); @@ -2007,10 +2007,17 @@ All parameter, weight, gradient are variables in Paddle. - std::cerr << "11" << std::endl; - self.run(vec_name, vec_tensor, vec_fetch_name); + //std::cerr << "11" << std::endl; + std::vector vec_out; + self.run(vec_name, vec_tensor, vec_fetch_name, vec_out); //self.Run(prog, scope, block_id, create_local_scope, create_vars, // fetch_vars); + std::vector< py::array> vec_ret; + for( size_t i = 0; i < vec_out.size(); ++i ) + { + vec_ret.push_back( TensorToPyArray(vec_out[i], true) ) ; + } + return vec_ret; }); m.def("init_gflags", framework::InitGflags);