From f5e73f4c7e526e10ec8efe4afc4487b8f60e743d Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 3 Oct 2017 23:29:03 +0000 Subject: [PATCH] pass simple elementwise_add op --- paddle/framework/executor.cc | 36 ++++++++---------- paddle/framework/executor_test.cc | 63 +++++++++++++++++++++---------- 2 files changed, 58 insertions(+), 41 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 94b9b3b350..da387b47ba 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -13,8 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/framework/executor.h" +#include #include #include +#include "paddle/framework/lod_tensor.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/scope.h" @@ -30,41 +32,33 @@ Executor::Executor(const std::vector& places) { void Executor::Run(const ProgramDesc& pdesc, Scope* scope, std::vector* outputs) { - // operators running // TODO(tonyyang-svail): // - only runs the first block // - only runs on the first device + // - test on gpu auto& block = pdesc.blocks(0); auto& device = devices_[0]; + // TODO(tonyyang-svail): + // - runs on a new local scope + // Scope& local_scope = scope->NewScope(); + for (auto& var : block.vars()) { scope->NewVar(var.name()); } - // std::vector ops; for (auto& op_desc : block.ops()) { - auto op = framework::OpRegistry::CreateOp(op_desc); - // op->InferShape(*scope); + auto op = paddle::framework::OpRegistry::CreateOp(op_desc); op->Run(*scope, *device->cpu_device_context); } - // TODO(tonyyang-svail): need to test gpu device - // device_->cpu_device_context->Wait(); - // #ifndef PADDLE_ONLY_CPU - // if (device_->cuda_device_context) { - // device_->cuda_device_context->Wait(); - // } - // #endif - - Scope& local_scope = scope->NewScope(); - local_scope.NewVar(); - for (auto device : devices_) { - device->cpu_device_context->Wait(); -#ifndef PADDLE_ONLY_CPU - if (device->cuda_device_context) { - device->cuda_device_context->Wait(); - } -#endif + // print tensor value + for (auto& var : block.vars()) { + std::cout << var.name() << std::endl; + auto v = scope->FindVar(var.name()); + const LoDTensor& t = v->Get(); + for (int i = 0; i < t.numel(); ++i) std::cout << t.data()[i] << " "; + std::cout << std::endl; } } diff --git a/paddle/framework/executor_test.cc b/paddle/framework/executor_test.cc index 11255af808..300de36b87 100644 --- a/paddle/framework/executor_test.cc +++ b/paddle/framework/executor_test.cc @@ -16,16 +16,49 @@ limitations under the License. */ #include "gtest/gtest.h" #include "paddle/framework/attribute.h" -#include #include "paddle/framework/grad_op_builder.h" #include "paddle/framework/op_registry.h" #include "paddle/framework/operator.h" +#include + USE_OP(elementwise_add); +USE_OP(gaussian_random); using namespace paddle::platform; using namespace paddle::framework; +typedef paddle::framework::BlockDesc proto_block; +typedef paddle::framework::OpDesc proto_op; + +using std::string; + +void add_gaussian_random_op(string var_name, proto_block* block) { + std::vector dim{2, 3}; + + // insert variable + auto a = block->add_vars(); + a->set_name(var_name); + auto a_lt = a->mutable_lod_tensor(); + a_lt->set_data_type(paddle::framework::DataType::FP32); + for (int i : dim) { + a_lt->add_dims(i); + } + + // insert operation + auto op = block->add_ops(); + op->set_type("gaussian_random"); + auto dims = op->add_attrs(); + dims->set_name("dims"); + dims->set_type(paddle::framework::AttrType::INTS); + for (int i : dim) { + dims->add_ints(i); + } + auto Out = op->add_outputs(); + Out->set_parameter("Out"); + Out->add_arguments(var_name); +} + TEST(Executor, Init) { ProgramDesc pdesc; @@ -33,35 +66,25 @@ TEST(Executor, Init) { root_block->set_idx(0); root_block->set_parent_idx(-1); - auto a = root_block->add_vars(); - a->set_name("a"); - auto a_lt = a->mutable_lod_tensor(); - a_lt->set_data_type(paddle::framework::DataType::FP32); - a_lt->add_dims(640); - a_lt->add_dims(640); - - auto b = root_block->add_vars(); - b->set_name("b"); - auto b_lt = b->mutable_lod_tensor(); - b_lt->set_data_type(paddle::framework::DataType::FP32); - b_lt->add_dims(640); - b_lt->add_dims(640); + add_gaussian_random_op("a", root_block); + add_gaussian_random_op("b", root_block); auto c = root_block->add_vars(); c->set_name("c"); auto c_lt = c->mutable_lod_tensor(); c_lt->set_data_type(paddle::framework::DataType::FP32); - c_lt->add_dims(640); - c_lt->add_dims(640); - auto op1 = root_block->add_ops(); - op1->set_type("elementwise_add"); - auto X = op1->add_inputs(); + auto op = root_block->add_ops(); + op->set_type("elementwise_add"); + auto X = op->add_inputs(); X->set_parameter("X"); X->add_arguments("a"); - auto Y = op1->add_inputs(); + auto Y = op->add_inputs(); Y->set_parameter("Y"); Y->add_arguments("b"); + auto Out = op->add_outputs(); + Out->set_parameter("Out"); + Out->add_arguments("c"); CPUPlace cpu_place1, cpu_place2; std::vector places; -- GitLab