From a528a9717ec5880f271b9d216cb5532cee9d4504 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Tue, 10 Oct 2017 21:32:03 +0000 Subject: [PATCH] remove prune as member function to function --- paddle/framework/executor.cc | 120 +++++++++++++++++------------------ paddle/framework/executor.h | 23 ++++--- 2 files changed, 71 insertions(+), 72 deletions(-) diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index def1d1fd06..3c35102ff9 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -32,66 +32,7 @@ namespace framework { const std::string kFeedOpType = "feed"; const std::string kFetchOpType = "fetch"; -Executor::Executor(const std::vector& places) { - PADDLE_ENFORCE_GT(places.size(), 0); - device_contexts_.resize(places.size()); - for (size_t i = 0; i < places.size(); i++) { - if (platform::is_cpu_place(places[i])) { - device_contexts_[i] = new platform::CPUDeviceContext( - boost::get(places[i])); - } else if (platform::is_gpu_place(places[i])) { -#ifdef PADDLE_WITH_CUDA - device_contexts_[i] = new platform::CUDADeviceContext( - boost::get(places[i])); -#else - PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); -#endif - } - } -} - -Executor::~Executor() { - for (auto& device_context : device_contexts_) { - delete device_context; - } -} - -void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { - // TODO(tonyyang-svail): - // - only runs on the first device (i.e. no interdevice communication) - // - will change to use multiple blocks for RNN op and Cond Op - PADDLE_ENFORCE_GT(pdesc.blocks_size(), block_id); - auto& block = pdesc.blocks(block_id); - auto& device = device_contexts_[0]; - - // Instantiate all the vars in the global scope - for (auto& var : block.vars()) { - scope->NewVar(var.name()); - } - - Scope& local_scope = scope->NewScope(); - - std::vector should_run = Prune(pdesc, block_id); - PADDLE_ENFORCE_EQ(should_run.size(), static_cast(block.ops_size())); - for (size_t i = 0; i < should_run.size(); ++i) { - if (should_run[i]) { - for (auto& var : block.ops(i).outputs()) { - for (auto& argu : var.arguments()) { - if (local_scope.FindVar(argu) == nullptr) { - local_scope.NewVar(argu); - } - } - } - auto op = paddle::framework::OpRegistry::CreateOp(block.ops(i)); - op->Run(local_scope, *device); - } - } - - // TODO(tonyyang-svail): - // - Destroy local_scope -} - -std::vector Executor::Prune(const ProgramDesc& pdesc, int block_id) { +std::vector Prune(const ProgramDesc& pdesc, int block_id) { // TODO(tonyyang-svail): // - will change to use multiple blocks for RNN op and Cond Op @@ -159,5 +100,64 @@ std::vector Executor::Prune(const ProgramDesc& pdesc, int block_id) { return should_run; } +Executor::Executor(const std::vector& places) { + PADDLE_ENFORCE_GT(places.size(), 0); + device_contexts_.resize(places.size()); + for (size_t i = 0; i < places.size(); i++) { + if (platform::is_cpu_place(places[i])) { + device_contexts_[i] = new platform::CPUDeviceContext( + boost::get(places[i])); + } else if (platform::is_gpu_place(places[i])) { +#ifdef PADDLE_WITH_CUDA + device_contexts_[i] = new platform::CUDADeviceContext( + boost::get(places[i])); +#else + PADDLE_THROW("'GPUPlace' is not supported in CPU only device."); +#endif + } + } +} + +Executor::~Executor() { + for (auto& device_context : device_contexts_) { + delete device_context; + } +} + +void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id) { + // TODO(tonyyang-svail): + // - only runs on the first device (i.e. no interdevice communication) + // - will change to use multiple blocks for RNN op and Cond Op + PADDLE_ENFORCE_GT(pdesc.blocks_size(), block_id); + auto& block = pdesc.blocks(block_id); + auto& device = device_contexts_[0]; + + // Instantiate all the vars in the global scope + for (auto& var : block.vars()) { + scope->NewVar(var.name()); + } + + Scope& local_scope = scope->NewScope(); + + std::vector should_run = Prune(pdesc, block_id); + PADDLE_ENFORCE_EQ(should_run.size(), static_cast(block.ops_size())); + for (size_t i = 0; i < should_run.size(); ++i) { + if (should_run[i]) { + for (auto& var : block.ops(i).outputs()) { + for (auto& argu : var.arguments()) { + if (local_scope.FindVar(argu) == nullptr) { + local_scope.NewVar(argu); + } + } + } + auto op = paddle::framework::OpRegistry::CreateOp(block.ops(i)); + op->Run(local_scope, *device); + } + } + + // TODO(tonyyang-svail): + // - Destroy local_scope +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/executor.h b/paddle/framework/executor.h index 7fac4f4f46..4e3bc2c0a5 100644 --- a/paddle/framework/executor.h +++ b/paddle/framework/executor.h @@ -36,21 +36,20 @@ class Executor { */ void Run(const ProgramDesc&, Scope*, int); - protected: - /* @Brief - * Pruning the graph - * - * @param - * ProgramDesc - * - * @return - * vector Same size as ops. Indicates whether an op should be run. - */ - std::vector Prune(const ProgramDesc& pdesc, int block_id); - private: std::vector device_contexts_; }; +/* @Brief + * Pruning the graph + * + * @param + * ProgramDesc + * + * @return + * vector Same size as ops. Indicates whether an op should be run. + */ +std::vector Prune(const ProgramDesc& pdesc, int block_id); + } // namespace framework } // namespace paddle -- GitLab