From 952508527a94de94a78c9250545dcd2d37d21ff4 Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Fri, 22 Nov 2019 10:05:29 +0800 Subject: [PATCH] Polish some PE code details (#21274) * polish code details, test=develop * futher polish hint msg, test=develop --- .../ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc | 2 +- paddle/fluid/framework/parallel_executor.cc | 7 +++---- ...ared_memory_reuse_pass_and_fuse_optimization_op_pass.py | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc index d637269f86..415d40a202 100644 --- a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc @@ -55,7 +55,7 @@ void FuseOptimizerOpPass::ApplyImpl(ir::Graph *graph) const { VLOG(6) << "Find " << fuse_op_type << " operators : " << opt_ops_num << ", and " << opt_nodes.size() << " for dense gradients."; - if (opt_nodes.size() == 0) return; + if (opt_nodes.size() <= 1) return; if (result.Has(details::kFusedOptType)) { auto &opt_type = result.Get(details::kFusedOptType); VLOG(6) << "Currently only support fusing one type of optimizer op, " diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index a71e4ee5cb..91bc26b1fd 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -421,10 +421,9 @@ ParallelExecutor::ParallelExecutor(const std::vector &places, #endif LOG(INFO) << string::Sprintf( - "The number of %s, which is used in ParallelExecutor, is %lu. And " - "the Program will be copied %lu copies", - (member_->use_cuda_ ? "CUDAPlace" : "CPUPlace"), places.size(), - places.size()); + "The Program will be executed on %s using ParallelExecutor, %lu " + "cards are used, so %lu programs are executed in parallel.", + (member_->use_cuda_ ? "CUDA" : "CPU"), places.size(), places.size()); // Step 1. Bcast the bcast_vars to devs. // Create local scopes diff --git a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass.py b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass.py index fe0f67635f..0b14cab4a7 100644 --- a/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass.py +++ b/python/paddle/fluid/tests/unittests/test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass.py @@ -28,7 +28,7 @@ class CUDAInplaceTestWithFuseOptimizationOps(InplaceTestBase): self.check_single_card_fetch_var() -class CPUAInplaceTestWithFuseOptimizationOps(InplaceTestBase): +class CPUInplaceTestWithFuseOptimizationOps(InplaceTestBase): def initParameter(self): self.use_cuda = False self.fuse_all_optimizer_ops = True -- GitLab