From 084cdd1f4f78eac9fcae4759575e172d87e81598 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 28 Mar 2018 15:23:39 +0800 Subject: [PATCH] Rename code --- paddle/fluid/framework/details/computation_op_handle.cc | 4 ++-- paddle/fluid/framework/details/fetch_op_handle.cc | 4 ++-- .../framework/details/multi_devices_graph_builder.cc | 2 +- .../fluid/framework/details/nccl_all_reduce_op_handle.cc | 4 ++-- paddle/fluid/framework/details/op_handle_base.cc | 8 ++++---- paddle/fluid/framework/details/op_handle_base.h | 2 +- .../fluid/framework/details/scale_loss_grad_op_handle.cc | 4 ++-- .../framework/details/threaded_ssa_graph_executor.cc | 2 +- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/paddle/fluid/framework/details/computation_op_handle.cc b/paddle/fluid/framework/details/computation_op_handle.cc index 53ab8eb7754..7a1b40c0b60 100644 --- a/paddle/fluid/framework/details/computation_op_handle.cc +++ b/paddle/fluid/framework/details/computation_op_handle.cc @@ -24,10 +24,10 @@ ComputationOpHandle::ComputationOpHandle(const OpDesc &op_desc, Scope *scope, place_(place) {} void ComputationOpHandle::RunImpl() { - auto *cur_ctx = dev_ctx_[place_]; + auto *cur_ctx = dev_ctxes_[place_]; for (auto *in : inputs_) { bool need_wait = - in->generated_op_ && in->generated_op_->dev_ctx_[place_] != cur_ctx; + in->generated_op_ && in->generated_op_->dev_ctxes_[place_] != cur_ctx; if (need_wait) { in->generated_op_->Wait(cur_ctx); } diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index 4fc05b32489..9180903b864 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -60,8 +60,8 @@ void FetchOpHandle::RunImpl() { auto &t = scope->FindVar(var_name)->Get(); if (platform::is_gpu_place(var->place_)) { #ifdef PADDLE_WITH_CUDA - TensorCopy(t, cpu, *dev_ctx_[t.place()], &tensors_[i]); - dev_ctx_[t.place()]->Wait(); + TensorCopy(t, cpu, *dev_ctxes_[t.place()], &tensors_[i]); + dev_ctxes_[t.place()]->Wait(); #endif } else { tensors_[i].ShareDataWith(t); diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index 67987760764..a1b913a863c 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -74,7 +74,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::Build( result.ops_.emplace_back(new ComputationOpHandle(*op, s, p)); auto *op_handle = result.ops_.back().get(); - op_handle->dev_ctx_[p] = const_cast( + op_handle->dev_ctxes_[p] = const_cast( platform::DeviceContextPool::Instance().Get(p)); auto var_names = op->InputArgumentNames(); diff --git a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc index f77a4b55a17..5ddf331cfca 100644 --- a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc @@ -23,7 +23,7 @@ NCCLAllReduceOpHandle::NCCLAllReduceOpHandle( const platform::NCCLContextMap &ctxs) : local_scopes_(local_scopes), places_(places), nccl_ctxs_(ctxs) { for (auto &p : places_) { - this->dev_ctx_[p] = nccl_ctxs_.DevCtx(p); + this->dev_ctxes_[p] = nccl_ctxs_.DevCtx(p); } } @@ -34,7 +34,7 @@ void NCCLAllReduceOpHandle::RunImpl() { // Wait input done for (auto *in : inputs_) { auto &p = static_cast(in)->place_; - in->generated_op_->Wait(dev_ctx_[p]); + in->generated_op_->Wait(dev_ctxes_[p]); } auto &var_name = static_cast(this->inputs_[0])->name_; diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index 63affb70542..e4194a7442f 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -42,7 +42,7 @@ OpHandleBase::~OpHandleBase() { void OpHandleBase::Run(bool use_event) { #ifdef PADDLE_WITH_CUDA if (events_.empty() && use_event) { - for (auto &p : dev_ctx_) { + for (auto &p : dev_ctxes_) { int dev_id = boost::get(p.first).device; PADDLE_ENFORCE(cudaSetDevice(dev_id)); PADDLE_ENFORCE( @@ -57,7 +57,7 @@ void OpHandleBase::Run(bool use_event) { #ifdef PADDLE_WITH_CUDA if (use_event) { - for (auto &p : dev_ctx_) { + for (auto &p : dev_ctxes_) { int dev_id = boost::get(p.first).device; auto stream = static_cast(p.second)->stream(); @@ -70,7 +70,7 @@ void OpHandleBase::Run(bool use_event) { void OpHandleBase::Wait(platform::DeviceContext *waited_dev) { #ifdef PADDLE_WITH_CUDA if (platform::is_cpu_place(waited_dev->GetPlace()) || events_.empty()) { - for (auto &dev_ctx : dev_ctx_) { + for (auto &dev_ctx : dev_ctxes_) { dev_ctx.second->Wait(); } } else { @@ -81,7 +81,7 @@ void OpHandleBase::Wait(platform::DeviceContext *waited_dev) { } } #else - for (auto &dev_ctx : dev_ctx_) { + for (auto &dev_ctx : dev_ctxes_) { dev_ctx.second->Wait(); } #endif diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index 78f566c0356..71672fd24c6 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -31,7 +31,7 @@ class OpHandleBase { std::vector outputs_; std::unordered_map - dev_ctx_; + dev_ctxes_; #ifdef PADDLE_WITH_CUDA std::unordered_map events_; diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc index a6a67c9b145..0a6f6129b81 100644 --- a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc @@ -21,7 +21,7 @@ ScaleLossGradOpHandle::ScaleLossGradOpHandle(size_t num_dev, Scope *scope, platform::Place place, platform::DeviceContext *dev_ctx) : coeff_(static_cast(1.0 / num_dev)), scope_(scope), place_(place) { - dev_ctx_[place_] = dev_ctx; + dev_ctxes_[place_] = dev_ctx; } ScaleLossGradOpHandle::~ScaleLossGradOpHandle() {} @@ -38,7 +38,7 @@ void ScaleLossGradOpHandle::RunImpl() { } else { #ifdef PADDLE_WITH_CUDA auto stream = - static_cast(this->dev_ctx_[place_]) + static_cast(this->dev_ctxes_[place_]) ->stream(); memory::Copy(boost::get(place_), tmp, platform::CPUPlace(), &coeff_, sizeof(float), stream); diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index fc840315562..105e21cab60 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -96,7 +96,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( // FIXME: Use new device context for (auto &p : places_) { - op->dev_ctx_[p] = fetch_ctxs_.Get(p); + op->dev_ctxes_[p] = fetch_ctxs_.Get(p); } for (auto *var : vars) { -- GitLab