diff --git a/paddle/fluid/framework/details/execution_strategy.h b/paddle/fluid/framework/details/execution_strategy.h index e7aa74742f827efabff1189d3213edd748d9082d..716d674fa29bad9321fc20979775c06f26bf4679 100644 --- a/paddle/fluid/framework/details/execution_strategy.h +++ b/paddle/fluid/framework/details/execution_strategy.h @@ -20,7 +20,7 @@ namespace details { struct ExecutionStrategy { size_t num_threads_{0}; - bool use_event_{true}; + bool use_cuda_{true}; bool allow_op_delay_{false}; size_t num_iteration_per_drop_scope_{100}; }; diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index 3849cca59a3347137b769f97261cfbf97da8d6ff..f79565fe71c4aef140475c922cbbf5a1e0b7fe03 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -39,9 +39,9 @@ OpHandleBase::~OpHandleBase() { #endif } -void OpHandleBase::Run(bool use_event) { +void OpHandleBase::Run(bool use_cuda) { #ifdef PADDLE_WITH_CUDA - if (events_.empty() && use_event) { + if (events_.empty() && use_cuda) { for (auto &p : dev_ctxes_) { int dev_id = boost::get(p.first).device; PADDLE_ENFORCE(cudaSetDevice(dev_id)); @@ -50,7 +50,7 @@ void OpHandleBase::Run(bool use_event) { } } #else - PADDLE_ENFORCE(!use_event); + PADDLE_ENFORCE(!use_cuda); #endif RunImpl(); diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index dc92b0fe9f760d95d4869fdd56c0400b6710437f..fbd90a3296bca92b097cab925b218b91e7f4752f 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -36,7 +36,7 @@ class OpHandleBase { virtual std::string Name() const = 0; - void Run(bool use_event); + void Run(bool use_cuda); virtual void RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx); diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index 496fadd04dac982b87b9d9e14f599ed37d9709d0..23302ffe4c784d91ebd76e17d4d00a1c4af321f0 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -192,7 +192,7 @@ void ThreadedSSAGraphExecutor::RunOp( if (VLOG_IS_ON(10)) { VLOG(10) << op << " " << op->Name() << " : " << op->DebugString(); } - op->Run(strategy_.use_event_); + op->Run(strategy_.use_cuda_); VLOG(10) << op << " " << op->Name() << " Done "; running_ops_--; ready_var_q->Extend(op->Outputs()); diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 38f1e756bf45448b8ea0c26e068d334f31b2c870..cb30f43a1d6b56ceb5cfc05a39b8132b19512bcf 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -61,7 +61,7 @@ ParallelExecutor::ParallelExecutor( size_t num_trainers, size_t trainer_id) : member_(new ParallelExecutorPrivate(places)) { member_->global_scope_ = scope; - member_->use_cuda_ = exec_strategy.use_event_; + member_->use_cuda_ = exec_strategy.use_cuda_; // Step 1. Bcast the params to devs. // Create local scopes diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 669d1bdaa3ec194be817cdc5e1f8484770c70c68..c88fbef63cf26c671246b15ea9872da0e7a92c1a 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -509,10 +509,10 @@ All parameter, weight, gradient are variables in Paddle. self.num_threads_ = num_threads; }) .def_property( - "use_event", - [](const ExecutionStrategy &self) { return self.use_event_; }, - [](ExecutionStrategy &self, bool use_event) { - self.use_event_ = use_event; + "use_cuda", + [](const ExecutionStrategy &self) { return self.use_cuda_; }, + [](ExecutionStrategy &self, bool use_cuda) { + self.use_cuda_ = use_cuda; }) .def_property( "allow_op_delay", diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 50fc085d672a4541b0d52ed47ef1ba82268e92b1..0fdc9a035292b3390cece6c5821a60b1b281e54d 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -113,7 +113,7 @@ class ParallelExecutor(object): if exec_strategy is None: exec_strategy = ExecutionStrategy() - exec_strategy.use_event = use_cuda + exec_strategy.use_cuda = use_cuda if exec_strategy.num_threads == 0: if use_cuda: