From fd3aad6cb3f398b3ce9eeca28adb513ffeb9acc4 Mon Sep 17 00:00:00 2001 From: chengduo Date: Tue, 23 Jul 2019 11:39:18 +0800 Subject: [PATCH] Make fuse_optimizer_op_pass also work when the model contains sparse gradients. (#18664) * support sparse gradients test=develop --- paddle/fluid/framework/details/CMakeLists.txt | 2 +- .../fluid/framework/details/build_strategy.cc | 51 +-- .../details/fused_all_reduce_op_handle.cc | 20 +- .../framework/details/multi_devices_helper.h | 8 +- .../scope_buffered_ssa_graph_executor.cc | 25 ++ paddle/fluid/framework/ir/CMakeLists.txt | 2 +- ...d_pass.cc => coalesce_grad_tensor_pass.cc} | 309 +++++++++--------- ...rad_pass.h => coalesce_grad_tensor_pass.h} | 0 .../fuse_optimizer_op_pass.cc | 302 +++++++++-------- .../fuse_optimizer_op_pass.h | 29 +- paddle/fluid/framework/ir/graph_helper.cc | 2 - paddle/fluid/framework/ir/graph_helper.h | 4 + .../fuse_all_reduce_op_pass.cc | 6 +- paddle/fluid/framework/ir/node.h | 2 +- paddle/fluid/framework/ir/pass.cc | 4 - paddle/fluid/operators/CMakeLists.txt | 1 + ...uous_space_op.cc => coalesce_tensor_op.cc} | 63 ++-- paddle/fluid/platform/CMakeLists.txt | 8 +- .../fluid/platform/device_memory_aligment.cc | 34 ++ .../fluid/platform/device_memory_aligment.h | 27 ++ paddle/fluid/pybind/pybind.cc | 2 +- .../unittests/parallel_executor_test_base.py | 6 + .../fluid/tests/unittests/simple_nets.py | 28 ++ ...space_op.py => test_coalesce_tensor_op.py} | 9 +- .../unittests/test_fuse_all_reduce_pass.py | 115 +++++-- .../unittests/test_fuse_optimizer_pass.py | 108 ++++-- 26 files changed, 693 insertions(+), 474 deletions(-) rename paddle/fluid/framework/ir/{alloc_continuous_space_for_grad_pass.cc => coalesce_grad_tensor_pass.cc} (60%) rename paddle/fluid/framework/ir/{alloc_continuous_space_for_grad_pass.h => coalesce_grad_tensor_pass.h} (100%) rename paddle/fluid/operators/{alloc_continuous_space_op.cc => coalesce_tensor_op.cc} (78%) create mode 100644 paddle/fluid/platform/device_memory_aligment.cc create mode 100644 paddle/fluid/platform/device_memory_aligment.h rename python/paddle/fluid/tests/unittests/{test_alloc_continuous_space_op.py => test_coalesce_tensor_op.py} (91%) diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index f1a54e914f..c566f0a5a3 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -95,5 +95,5 @@ cc_library(build_strategy SRCS build_strategy.cc DEPS fuse_elewise_add_act_pass multi_batch_merge_pass fuse_relu_depthwise_conv_pass memory_optimize_pass lock_free_optimize_pass - alloc_continuous_space_for_grad_pass fuse_all_reduce_op_pass backward_optimizer_op_deps_pass + coalesce_grad_tensor_pass fuse_all_reduce_op_pass backward_optimizer_op_deps_pass fuse_adam_op_pass fuse_sgd_op_pass fuse_momentum_op_pass record_skip_memory_opt_vars_pass) diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index 5871d63f2e..326a463108 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -76,6 +76,7 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { PADDLE_ENFORCE(!FLAGS_use_mkldnn, "Please compile with MKLDNN first to use MKLDNN"); #endif + if (strategy_.enable_sequential_execution_) { VLOG(1) << "Add sequential_execution_pass"; AppendPass("sequential_execution_pass"); @@ -108,30 +109,34 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { } // for single card training, fuse_all_reduce_ops is unnecessary. - // alloc_continuous_space_for_grad_pass should be before of MultiDevPass. + // coalesce_grad_tensor_pass should be before of MultiDevPass. if (strategy_.fuse_all_reduce_ops_) { - VLOG(1) << "Add alloc_continuous_space_for_grad_pass"; - AppendPass("alloc_continuous_space_for_grad_pass"); + VLOG(1) << "Add coalesce_grad_tensor_pass"; + AppendPass("coalesce_grad_tensor_pass"); } + // Fuse all the optimization operators. + if (strategy_.is_distribution_) { + VLOG(3) << "Currently, fuse_all_optimizer_ops only works under " + "Non-distributed mode."; + strategy_.fuse_all_optimizer_ops_ = false; + } + if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kReduce || + strategy_.is_distribution_) { + VLOG(3) << "Currently, fuse_all_optimizer_ops only works under AllReduce " + "mode."; + strategy_.fuse_all_optimizer_ops_ = false; + } if (strategy_.fuse_all_optimizer_ops_) { - if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kReduce || - strategy_.is_distribution_) { - VLOG(3) - << "Currently, fuse_all_optimizer_ops only works under AllReduce " - "mode."; - strategy_.fuse_all_optimizer_ops_ = false; - } else { - // NOTE: fuse_all_xx_ops will count the number of xx operator first, - // if the number is zero, fuse_all_reduce_ops will do nothing. - // Currently, only one type of optimization algorithm can be fused. - VLOG(1) << "Add fuse_adam_op_pass"; - AppendPass("fuse_adam_op_pass"); - VLOG(1) << "Add fuse_sgd_op_pass"; - AppendPass("fuse_sgd_op_pass"); - VLOG(1) << "Add fuse_momentum_op_pass"; - AppendPass("fuse_momentum_op_pass"); - } + // NOTE: fuse_all_xx_ops will count the number of xx operator first, + // if the number is zero, fuse_all_reduce_ops will do nothing. + // Currently, only one type of optimization algorithm can be fused. + VLOG(1) << "Add fuse_adam_op_pass"; + AppendPass("fuse_adam_op_pass"); + VLOG(1) << "Add fuse_sgd_op_pass"; + AppendPass("fuse_sgd_op_pass"); + VLOG(1) << "Add fuse_momentum_op_pass"; + AppendPass("fuse_momentum_op_pass"); } // Add a graph viz pass to record a graph. @@ -301,7 +306,7 @@ ir::Graph *BuildStrategy::Apply(ir::Graph *graph, pass->Erase(kNCCLCtxs); pass->SetNotOwned(kNCCLCtxs, nctx); #endif - } else if (pass->Type() == "alloc_continuous_space_for_grad_pass" || + } else if (pass->Type() == "coalesce_grad_tensor_pass" || pass->Type() == "fuse_adam_op_pass" || pass->Type() == "fuse_sgd_op_pass" || pass->Type() == "fuse_momentum_op_pass" || @@ -321,7 +326,7 @@ ir::Graph *BuildStrategy::Apply(ir::Graph *graph, new bool(use_hierarchical_allreduce_)); #endif } - } else if (pass->Type() == "alloc_continuous_space_for_grad_pass") { + } else if (pass->Type() == "coalesce_grad_tensor_pass") { pass->Erase(kPlaces); pass->SetNotOwned>(kPlaces, &places); pass->Erase(kLocalScopes); @@ -389,7 +394,7 @@ USE_PASS(backward_optimizer_op_deps_pass); USE_PASS(modify_op_lock_and_record_event_pass); USE_PASS(inplace_pass); USE_PASS(lock_free_optimize_pass); -USE_PASS(alloc_continuous_space_for_grad_pass); +USE_PASS(coalesce_grad_tensor_pass); USE_PASS(graph_to_program_pass); USE_PASS(fuse_adam_op_pass); USE_PASS(fuse_sgd_op_pass); diff --git a/paddle/fluid/framework/details/fused_all_reduce_op_handle.cc b/paddle/fluid/framework/details/fused_all_reduce_op_handle.cc index 8066ca7813..23f0b4396b 100644 --- a/paddle/fluid/framework/details/fused_all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/fused_all_reduce_op_handle.cc @@ -17,6 +17,7 @@ #include "paddle/fluid/framework/details/container_cast.h" #include "paddle/fluid/framework/details/reduce_and_gather.h" #include "paddle/fluid/framework/details/variable_visitor.h" +#include "paddle/fluid/platform/device_memory_aligment.h" #include "paddle/fluid/platform/profiler.h" DEFINE_bool(skip_fused_all_reduce_check, false, ""); @@ -24,19 +25,6 @@ namespace paddle { namespace framework { namespace details { -// Note(zcd): Addresses should be aligned, otherwise, the results may have -// diff. -static size_t Alignment(size_t size, const platform::Place &place) { - // Allow to allocate the minimum chunk size is 4 KB. - size_t alignment = 1 << 12; - if (platform::is_gpu_place(place)) { - // Allow to allocate the minimum chunk size is 256 B. - alignment = 1 << 8; - } - size_t remaining = size % alignment; - return remaining == 0 ? size : size + (alignment - remaining); -} - typedef std::vector>> GradientAndLoDTensor; @@ -121,7 +109,7 @@ void FusedAllReduceOpHandle::RunImpl() { for (size_t k = 1; k < g_tensor.size(); ++k) { const void *cur_address = g_tensor.at(k - 1).second->data(); int64_t len = g_tensor.at(k - 1).second->numel(); - auto offset = Alignment(len * size_of_dtype, places_[0]); + auto offset = platform::Alignment(len * size_of_dtype, places_[0]); void *infer_next_address = reinterpret_cast( reinterpret_cast(cur_address) + offset); const void *next_address = g_tensor.at(k).second->data(); @@ -241,8 +229,8 @@ void FusedAllReduceOpHandle::GetDTypeAndNumel( // Get element number int64_t len = grad_tensor.at(i).second->numel(); PADDLE_ENFORCE_GT(len, 0); - // Alignment(len) - *numel += Alignment(len * size_of_dtype, places_[0]) / size_of_dtype; + *numel += + platform::Alignment(len * size_of_dtype, places_[0]) / size_of_dtype; } } diff --git a/paddle/fluid/framework/details/multi_devices_helper.h b/paddle/fluid/framework/details/multi_devices_helper.h index 1cc6760fc5..8cd419540f 100644 --- a/paddle/fluid/framework/details/multi_devices_helper.h +++ b/paddle/fluid/framework/details/multi_devices_helper.h @@ -62,11 +62,15 @@ typedef std::vector FusedGrads; constexpr char kFusedGrads[] = "fused_gradients"; typedef std::vector> ParamsAndGrads; -constexpr char kParamsAndGrads[] = "params_grads"; +constexpr char kParamsAndDenseGrads[] = "params_and_dense_grads"; +constexpr char kParamsAndSparseGrads[] = "params_and_sparse_grads"; + +typedef std::vector ProgramDescs; +constexpr char kProgramDescs[] = "program_descs"; typedef std::vector>> GroupParamsAndGrads; -constexpr char kGroupParamsAndGrads[] = "group_params_grads"; +constexpr char kGroupParamsAndDenseGrads[] = "group_params_dense_grads"; } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc index 070d59517b..8459f3a47b 100644 --- a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc @@ -17,6 +17,8 @@ #include #include #include +#include "paddle/fluid/framework/details/multi_devices_helper.h" +#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/variable_helper.h" #include "paddle/fluid/platform/profiler.h" @@ -70,6 +72,29 @@ void ScopeBufferedSSAGraphExecutor::InitVariables() { InitializeVariable(pair.first, pair.second); } } + + const ir::Graph &graph = Graph(); + if (graph.Has(details::kProgramDescs)) { + auto &program_descs = + graph.Get(details::kProgramDescs); + // Init vars + auto &fused_grad_vars = graph.Get(details::kFusedVars); + for (size_t i = 0; i < local_exec_scopes_.size(); ++i) { + for (auto &var_name : fused_grad_vars) { + auto var = local_exec_scopes_[i]->Var(var_name); + var->GetMutable(); + } + } + + for (auto &program_desc : program_descs) { + for (auto &op_desc : program_desc.Block(0).AllOps()) { + for (size_t i = 0; i < local_exec_scopes_.size(); ++i) { + auto op = OpRegistry::CreateOp(*op_desc); + op->Run(*local_exec_scopes_[i], places_[i]); + } + } + } + } } void ScopeBufferedSSAGraphExecutor::DropLocalExeScopes() { diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index 5228840c96..0e12e35625 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -45,7 +45,7 @@ cc_library(graph_traits SRCS graph_traits.cc DEPS graph) cc_library(graph_pattern_detector SRCS graph_pattern_detector.cc DEPS graph graph_helper graph_traits) cc_library(fuse_pass_base SRCS fuse_pass_base.cc DEPS pass) -cc_library(alloc_continuous_space_for_grad_pass SRCS alloc_continuous_space_for_grad_pass.cc DEPS graph graph_helper) +cc_library(coalesce_grad_tensor_pass SRCS coalesce_grad_tensor_pass.cc DEPS graph graph_helper) pass_library(graph_to_program_pass base) pass_library(graph_viz_pass base) diff --git a/paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.cc b/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc similarity index 60% rename from paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.cc rename to paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc index 001da5686f..3eb4ef9fb3 100644 --- a/paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.cc +++ b/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.h" +#include "paddle/fluid/framework/ir/coalesce_grad_tensor_pass.h" #include #include #include @@ -53,107 +53,109 @@ static constexpr double kMB = 1048576.0; void SetFuseParameterGroupsSize(int group_size) { FLAGS_fuse_parameter_groups_size = group_size; } + int GetFuseParameterGroupsSize() { return FLAGS_fuse_parameter_groups_size; } void SetFuseParameterMemorySize(double memory_size) { FLAGS_fuse_parameter_memory_size = memory_size; } + double GetFuseParameterMemorySize() { return FLAGS_fuse_parameter_memory_size; } -class AllocContinuousSpaceForGradPass : public ir::Pass { +class CoalesceGradTensorPass : public ir::Pass { protected: void ApplyImpl(ir::Graph *graph) const { ir::Graph &result = *graph; - auto &places = Get>(details::kPlaces); - auto &local_scopes = Get>(details::kLocalScopes); - - ResetAttribute(details::kParamsAndGrads, &result); - ResetAttribute(details::kGroupParamsAndGrads, - &result); - - auto ¶ms_grads = - result.Get(details::kParamsAndGrads); + details::ParamsAndGrads params_grads; RecordParamsAndGrads(result, ¶ms_grads); - auto num_params_grads = params_grads.size(); - VLOG(10) << "The number of params and grads is:" << num_params_grads; - if (num_params_grads == 0) { + VLOG(10) << "The number of params and grads is:" << params_grads.size(); + if (params_grads.size() == 0) { return; } - std::unordered_map var_name2node; - std::unordered_map> - var_name2node_set; - for (ir::Node *node : result.Nodes()) { - if (node->IsVar() && node->Var()) { - // Note: The graph may have the same name node. For example, parameter - // is the input of operator and it also is the output of optimizer; - var_name2node.emplace(node->Var()->Name(), node); - var_name2node_set[node->Var()->Name()].emplace(node); + auto vars_info = GetVarInfo(result); + ResetAttribute(details::kParamsAndDenseGrads, + &result); + ResetAttribute(details::kParamsAndSparseGrads, + &result); + ResetAttribute( + details::kGroupParamsAndDenseGrads, &result); + auto &p_g_dense_grad = + result.Get(details::kParamsAndDenseGrads); + auto &p_g_sparse_grad = + result.Get(details::kParamsAndSparseGrads); + + for (auto ¶m_grad : params_grads) { + if (IsLoDTensorType(GetTypeOfVar(vars_info, param_grad.second))) { + p_g_dense_grad.emplace_back(param_grad); + } else { + p_g_sparse_grad.emplace_back(param_grad); } } - auto &group_params_grads = - result.Get(details::kGroupParamsAndGrads); - // Note: the order of params_grads may be changed by SetGroupParamsAndGrads. - SetGroupParamsAndGrads(var_name2node, params_grads, &group_params_grads); + VLOG(10) << "Dense grads: " << p_g_dense_grad.size() + << ", Sparse grads: " << p_g_sparse_grad.size(); + if (p_g_dense_grad.size() == 0) { + return; + } + + auto num_of_p_g_dense_grad = p_g_dense_grad.size(); + auto &group_params_grads = result.Get( + details::kGroupParamsAndDenseGrads); + // Note: the order of p_g_dense_grad may be changed by + // SetGroupParamsAndGrads. + SetGroupParamsAndGrads(vars_info, p_g_dense_grad, &group_params_grads); - params_grads.clear(); - params_grads.reserve(num_params_grads); + p_g_dense_grad.clear(); + p_g_dense_grad.reserve(num_of_p_g_dense_grad); for (auto &group_p_g : group_params_grads) { - params_grads.insert(params_grads.end(), group_p_g.begin(), - group_p_g.end()); + p_g_dense_grad.insert(p_g_dense_grad.end(), group_p_g.begin(), + group_p_g.end()); } PADDLE_ENFORCE_EQ( - num_params_grads, params_grads.size(), - "The number of params_grads is not consistent with before."); + p_g_dense_grad.size(), num_of_p_g_dense_grad, + "The number of p_g_dense_grad is not consistent with before."); - if (IsUnifiedDtype(params_grads, var_name2node)) { - SetGradientPersistable(params_grads, var_name2node, var_name2node_set); - AllocContinuousAddressSpace(places, local_scopes, var_name2node, - params_grads, &result); + if (IsUnifiedDtype(p_g_dense_grad, vars_info)) { + SetGradientPersistable(p_g_dense_grad, vars_info); + CoalesceTensors(vars_info, p_g_dense_grad, &result); } else { - // Set Gradients as Persistable to prevent this var becoming reusable. for (auto &sub_param_grad : group_params_grads) { - SetGradientPersistable(params_grads, var_name2node, var_name2node_set); - PADDLE_ENFORCE(IsUnifiedDtype(sub_param_grad, var_name2node), + SetGradientPersistable(p_g_dense_grad, vars_info); + PADDLE_ENFORCE(IsUnifiedDtype(sub_param_grad, vars_info), "The data type of the same group is not consistent."); - AllocContinuousAddressSpace(places, local_scopes, var_name2node, - sub_param_grad, &result); + CoalesceTensors(vars_info, sub_param_grad, &result); } } } void SetGradientPersistable( const std::vector> &sub_param_grad, - const std::unordered_map &var_name2node, - const std::unordered_map> - &var_name2node_set) const { + const std::unordered_map> &vars_info) + const { for (auto &p_g : sub_param_grad) { - // Get gradient var - auto iter = var_name2node.find(p_g.second); - PADDLE_ENFORCE(iter != var_name2node.end(), "%s is not found.", - p_g.second); + auto iter = vars_info.find(p_g.second); + PADDLE_ENFORCE(iter != vars_info.end(), "%s is not found.", p_g.second); + PADDLE_ENFORCE(!iter->second.empty()); // Set persistable - auto same_nodes = var_name2node_set.find(p_g.second); - PADDLE_ENFORCE(same_nodes != var_name2node_set.end(), "%s is not found.", - p_g.second); - for (auto it : same_nodes->second) { + for (auto it : iter->second) { + PADDLE_ENFORCE_NOT_NULL(it->Var()); it->Var()->SetPersistable(true); } - - PADDLE_ENFORCE(IsSupportedVarType(iter->second->Var()->GetType())); + PADDLE_ENFORCE(IsLoDTensorType(GetTypeOfVar(vars_info, p_g.second))); } } bool IsUnifiedDtype( const details::ParamsAndGrads ¶ms_grads, - const std::unordered_map &var_name2node) const { - auto dtype = - this->GetDtypeOfVar(var_name2node, params_grads.front().second); + const std::unordered_map> &vars_info) + const { + if (params_grads.empty()) return true; + auto dtype = GetDtypeOfVar(vars_info, params_grads.front().second); for (auto p_g : params_grads) { - auto next_dtype = this->GetDtypeOfVar(var_name2node, p_g.second); + auto next_dtype = GetDtypeOfVar(vars_info, p_g.second); if (next_dtype != dtype) { return false; } @@ -161,10 +163,8 @@ class AllocContinuousSpaceForGradPass : public ir::Pass { return true; } - void AllocContinuousAddressSpace( - const std::vector &places, - const std::vector &local_scopes, - const std::unordered_map &var_name2node, + void CoalesceTensors( + const std::unordered_map> &vars_info, const details::ParamsAndGrads ¶ms_grads, Graph *result) const { // Create a FusedVarsSet to avoid duplicating names for fused_var in other // pass. @@ -175,20 +175,22 @@ class AllocContinuousSpaceForGradPass : public ir::Pass { if (!result->Has(details::kFusedGrads)) { result->Set(details::kFusedGrads, new details::FusedGrads); } - + if (!result->Has(details::kProgramDescs)) { + result->Set(details::kProgramDescs, new details::ProgramDescs); + } // the fused_var_name should be unique, so it appends // params_grads.begin()->second. - auto fused_var_name = std::string(details::kFusedVarNamePrefix) + "@GRAD@" + - params_grads.begin()->second; - result->Get(details::kFusedGrads) - .emplace_back(fused_var_name); + auto fused_grad_var_name = std::string(details::kFusedVarNamePrefix) + + "@GRAD@" + params_grads.begin()->second; auto &fused_var_set = result->Get(details::kFusedVars); - PADDLE_ENFORCE_EQ(fused_var_set.count(fused_var_name), 0, - "%s is duplicate in FusedVars.", fused_var_name); - fused_var_set.insert(fused_var_name); + PADDLE_ENFORCE_EQ(fused_var_set.count(fused_grad_var_name), 0, + "%s is duplicate in FusedVars.", fused_grad_var_name); + fused_var_set.insert(fused_grad_var_name); + result->Get(details::kFusedGrads) + .emplace_back(fused_grad_var_name); - InitFusedVarsAndAllocSpaceForVars(places, local_scopes, var_name2node, - fused_var_name, params_grads); + InitFusedVarsAndAllocSpaceForVars(vars_info, fused_grad_var_name, + params_grads, result); } template @@ -201,16 +203,18 @@ class AllocContinuousSpaceForGradPass : public ir::Pass { } void SetGroupParamsAndGrads( - const std::unordered_map &var_nodes, + const std::unordered_map> &vars_info, const details::ParamsAndGrads ¶ms_grads, details::GroupParamsAndGrads *group_params_grads) const { - SetGroupAccordingToLayers(var_nodes, params_grads, group_params_grads); - SetGroupAccordingToMemorySize(var_nodes, group_params_grads); - ReGroupByDtype(var_nodes, params_grads, group_params_grads); + SetGroupAccordingToLayers(vars_info, params_grads, group_params_grads); + SetGroupAccordingToMemorySize(vars_info, group_params_grads); + if (!IsUnifiedDtype(params_grads, vars_info)) { + ReGroupByDtype(vars_info, group_params_grads); + } } void SetGroupAccordingToLayers( - const std::unordered_map &var_nodes, + const std::unordered_map> &vars_info, const details::ParamsAndGrads ¶ms_grads, details::GroupParamsAndGrads *group_params_grads) const { std::map var_idx; @@ -241,41 +245,38 @@ class AllocContinuousSpaceForGradPass : public ir::Pass { if (VLOG_IS_ON(10)) { VLOG(10) << "SetGroupAccordingToLayers: "; - PrintGroupInfo(var_nodes, group_params_grads); + PrintGroupInfo(vars_info, group_params_grads); } } void PrintGroupInfo( - const std::unordered_map &var_nodes, + const std::unordered_map> &vars_info, details::GroupParamsAndGrads *group_params_grads) const { for (size_t i = 0; i < group_params_grads->size(); ++i) { VLOG(10) << "group " << i; std::stringstream out; size_t gps_size = 0; for (auto &p_g : group_params_grads->at(i)) { - auto iter = var_nodes.find(p_g.first); - PADDLE_ENFORCE(iter != var_nodes.end(), "%s is not found.", p_g.first); - auto shape = iter->second->Var()->GetShape(); - size_t size = framework::SizeOfType(iter->second->Var()->GetDataType()); + auto var_desc = GetVarDescFromVarsInfo(vars_info, p_g.first); + auto shape = var_desc->GetShape(); + size_t size = framework::SizeOfType(var_desc->GetDataType()); std::for_each(shape.begin(), shape.end(), [&size](const int64_t &n) { size *= n; }); gps_size += size; out << string::Sprintf("(%s(%d), %s)", p_g.first, size, p_g.second); } - auto dtype = this->GetDtypeOfVar(var_nodes, - group_params_grads->at(i).front().first); - + auto dtype = + GetDtypeOfVar(vars_info, group_params_grads->at(i).front().first); VLOG(10) << out.str() << ", group size:" << group_params_grads->at(i).size() << ", group memory size:" << static_cast(gps_size) / kMB - << "(MB)" - << ", dtype:" << dtype; + << "(MB), dtype:" << dtype; } } void SetGroupAccordingToMemorySize( - const std::unordered_map &var_nodes, + const std::unordered_map> &vars_info, details::GroupParamsAndGrads *group_params_grads) const { const double group_memory_size = GetFuseParameterMemorySize(); if (group_memory_size <= 0.0) { @@ -290,28 +291,19 @@ class AllocContinuousSpaceForGradPass : public ir::Pass { size_t local_group_memory_size = 0; while (j < group_params_grads->size()) { - std::for_each( - group_params_grads->at(j).begin(), group_params_grads->at(j).end(), - [&local_group_memory_size, - &var_nodes](const std::pair &p_g) { - auto iter = var_nodes.find(p_g.second); - PADDLE_ENFORCE(iter != var_nodes.end(), "%s is not found.", - p_g.second); - - size_t size = - framework::SizeOfType(iter->second->Var()->GetDataType()); - auto shape = iter->second->Var()->GetShape(); - std::for_each(shape.begin(), shape.end(), - [&size](const int64_t &n) { size *= n; }); - - local_group_memory_size += size; - }); + for (auto &p_g_iter : group_params_grads->at(j)) { + auto var_desc = GetVarDescFromVarsInfo(vars_info, p_g_iter.second); + size_t size = framework::SizeOfType(var_desc->GetDataType()); + auto shape = var_desc->GetShape(); + std::for_each(shape.begin(), shape.end(), + [&size](const int64_t &n) { size *= n; }); + local_group_memory_size += size; + } group_p_g.insert(group_p_g.end(), group_params_grads->at(j).begin(), group_params_grads->at(j).end()); ++j; - if (j >= group_params_grads->size()) { break; } @@ -333,20 +325,15 @@ class AllocContinuousSpaceForGradPass : public ir::Pass { if (VLOG_IS_ON(10)) { VLOG(10) << string::Sprintf( - "SetGroupAccordingToMemorySize(memory_size: %f):", group_memory_size); - PrintGroupInfo(var_nodes, group_params_grads); + "SetGroupAccordingToMemorySize(memory_size: %f MB):", + GetFuseParameterMemorySize()); + PrintGroupInfo(vars_info, group_params_grads); } } void ReGroupByDtype( - const std::unordered_map &var_nodes, - const details::ParamsAndGrads ¶ms_grads, + const std::unordered_map> &vars_info, details::GroupParamsAndGrads *group_params_grads) const { - if (IsUnifiedDtype(params_grads, var_nodes)) { - VLOG(1) << "needn't regroup fusion params_grads"; - return; - } - details::GroupParamsAndGrads new_group_params_grads; for (auto &group_p_g : *group_params_grads) { @@ -354,7 +341,7 @@ class AllocContinuousSpaceForGradPass : public ir::Pass { details::GroupParamsAndGrads local_group_params_grads; for (auto &p_g : group_p_g) { - auto dtype = GetDtypeOfVar(var_nodes, p_g.second); + auto dtype = GetDtypeOfVar(vars_info, p_g.second); size_t idx = 0; auto var_idx_iter = type_idx.find(dtype); @@ -383,25 +370,53 @@ class AllocContinuousSpaceForGradPass : public ir::Pass { VLOG(10) << string::Sprintf("ReGroupByDtype(memory_size: %f MB, %u):", GetFuseParameterMemorySize(), GetFuseParameterGroupsSize()); - PrintGroupInfo(var_nodes, group_params_grads); + PrintGroupInfo(vars_info, group_params_grads); } } proto::VarType::Type GetDtypeOfVar( - const std::unordered_map &var_nodes, + const std::unordered_map> &vars_info, + const std::string &name) const { + auto var_desc = GetVarDescFromVarsInfo(vars_info, name); + return var_desc->GetDataType(); + } + + proto::VarType::Type GetTypeOfVar( + const std::unordered_map> &vars_info, const std::string &name) const { - auto grad_iter = var_nodes.find(name); - PADDLE_ENFORCE(grad_iter != var_nodes.end()); - PADDLE_ENFORCE_NOT_NULL(grad_iter->second->Var()); - return grad_iter->second->Var()->GetDataType(); + auto var_desc = GetVarDescFromVarsInfo(vars_info, name); + return var_desc->GetType(); } private: - bool IsSupportedVarType(const proto::VarType::Type &type) const { + bool IsLoDTensorType(const proto::VarType::Type &type) const { // Current only support LOD_TENSOR. return type == proto::VarType::LOD_TENSOR; } + std::unordered_map> GetVarInfo( + const Graph &result) const { + std::unordered_map> vars; + for (Node *node : result.Nodes()) { + if (node->IsVar() && node->Var()) { + // Note: The graph may have the same name node. For example, parameter + // is the input of operator and it also is the output of optimizer; + vars[node->Var()->Name()].emplace_back(node); + } + } + return vars; + } + + const VarDesc *GetVarDescFromVarsInfo( + const std::unordered_map> &vars_info, + const std::string &var_name) const { + auto grad_iter = vars_info.find(var_name); + PADDLE_ENFORCE(grad_iter != vars_info.end(), "%s is not found.", var_name); + PADDLE_ENFORCE(!grad_iter->second.empty()); + PADDLE_ENFORCE_NOT_NULL(grad_iter->second.front()->Var()); + return grad_iter->second.front()->Var(); + } + void RecordParamsAndGrads(const ir::Graph &graph, details::ParamsAndGrads *params_grads) const { std::vector topo_nodes = ir::TopologySortOperations(graph); @@ -431,30 +446,9 @@ class AllocContinuousSpaceForGradPass : public ir::Pass { } void InitFusedVarsAndAllocSpaceForVars( - const std::vector &places, - const std::vector &local_scopes, - const std::unordered_map &vars, + const std::unordered_map> &vars_info, const std::string &fused_var_name, - const details::ParamsAndGrads ¶ms_grads) const { - // Init Gradients and FusedVars - VLOG(10) << "Init FusedVars and Gradients."; - for (auto it = local_scopes.rbegin(); it != local_scopes.rend(); ++it) { - auto &scope = *it; - - PADDLE_ENFORCE(scope->FindVar(fused_var_name) == nullptr, - "%s has existed in scope.", fused_var_name); - scope->Var(fused_var_name)->GetMutable(); - - for (auto &p_g : params_grads) { - auto iter = vars.find(p_g.second); - PADDLE_ENFORCE(iter != vars.end()); - PADDLE_ENFORCE_NOT_NULL(iter->second->Var()); - PADDLE_ENFORCE_EQ(iter->second->Var()->GetType(), - proto::VarType::LOD_TENSOR); - scope->Var(p_g.second)->GetMutable(); - } - } - + const details::ParamsAndGrads ¶ms_grads, ir::Graph *result) const { // Alloc continuous space for vars. std::vector grads_name; std::vector params_name; @@ -464,16 +458,13 @@ class AllocContinuousSpaceForGradPass : public ir::Pass { params_name.emplace_back(p_g.first); grads_name.emplace_back(p_g.second); } - framework::ProgramDesc program_desc; - AppendAllocSpaceForVarsOp(params_name, grads_name, fused_var_name, - program_desc.MutableBlock(0)); - for (size_t i = 0; i < local_scopes.size(); ++i) { - for (auto &op_desc : program_desc.Block(0).AllOps()) { - auto op = OpRegistry::CreateOp(*op_desc); - op->Run(*local_scopes[i], places[i]); - } - } + result->Get(details::kProgramDescs).emplace_back(); + ProgramDesc &program_desc = + result->Get(details::kProgramDescs).back(); + auto *global_block = program_desc.MutableBlock(0); + AppendAllocSpaceForVarsOp(params_name, grads_name, fused_var_name, + global_block); } void AppendAllocSpaceForVarsOp(const std::vector ¶ms_name, @@ -481,7 +472,7 @@ class AllocContinuousSpaceForGradPass : public ir::Pass { const std::string &fused_var_name, BlockDesc *global_block) const { auto op_desc = global_block->AppendOp(); - op_desc->SetType("alloc_continuous_space"); + op_desc->SetType("coalesce_tensor"); op_desc->SetInput("Input", params_name); op_desc->SetOutput("Output", grads_name); op_desc->SetOutput("FusedOutput", {fused_var_name}); @@ -491,7 +482,7 @@ class AllocContinuousSpaceForGradPass : public ir::Pass { } // namespace framework } // namespace paddle -REGISTER_PASS(alloc_continuous_space_for_grad_pass, - paddle::framework::ir::AllocContinuousSpaceForGradPass) +REGISTER_PASS(coalesce_grad_tensor_pass, + paddle::framework::ir::CoalesceGradTensorPass) .RequirePassAttr(paddle::framework::details::kPlaces) .RequirePassAttr(paddle::framework::details::kLocalScopes); diff --git a/paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.h b/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.h similarity index 100% rename from paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.h rename to paddle/fluid/framework/ir/coalesce_grad_tensor_pass.h diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc index 4119d39598..ee601145c0 100644 --- a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc @@ -25,9 +25,6 @@ namespace ir { void FuseOptimizerOpPass::ApplyImpl(ir::Graph *graph) const { ir::Graph &result = *graph; - auto &places = Get>(details::kPlaces); - auto &local_scopes = Get>(details::kLocalScopes); - const std::string fuse_op_type = GetOpType(); std::vector aux_var_names = GetAuxiliaryVarNames(); aux_var_names.emplace_back(kParam); @@ -35,70 +32,91 @@ void FuseOptimizerOpPass::ApplyImpl(ir::Graph *graph) const { // Step 1: Get the specified op and auxiliary variables. std::vector topo_nodes = ir::TopologySortOperations(result); - std::unordered_map> aux_var_set; - std::vector opt_ops; + auto vars_info = GetVarInfo(result); + std::vector opt_nodes; + size_t opt_ops_num = 0; + // Note: Only take care about the dense gradients. for (auto &node : topo_nodes) { - GetSpecifiedOpsAndVars(fuse_op_type, aux_var_names, node, &opt_ops, - &aux_var_set); - } - - VLOG(6) << "Find " << fuse_op_type << " operators: " << opt_ops.size(); - if (opt_ops.size() == 0) { - return; + if (node->Op()->Type() == fuse_op_type) { + auto grad_name = node->Op()->Input(kGrad); + PADDLE_ENFORCE_EQ(grad_name.size(), static_cast(1)); + if (IsLoDTensorType(GetTypeOfVar(vars_info, grad_name[0]))) { + opt_nodes.emplace_back(node); + } + ++opt_ops_num; + } } - if (result.Has(details::kFusedOptType)) { - VLOG(6) << "Currently only support fusing one type optimizer op. Has fused " - << result.Get(details::kFusedOptType); + VLOG(6) << "Find " << fuse_op_type << " operators : " << opt_ops_num + << ", and " << opt_nodes.size() << " for dense gradients "; + if (opt_nodes.size() == 0 || result.Has(details::kFusedOptType)) { + if (result.Has(details::kFusedOptType)) { + auto &opt_type = + result.Get(details::kFusedOptType); + VLOG(6) << "Currently only support fusing one type optimizer op. " + "Has fused " + << opt_type; + } return; - } else { - result.Set(details::kFusedOptType, new details::FusedOptType); } + result.Set(details::kFusedOptType, new details::FusedOptType); result.Get(details::kFusedOptType) = fuse_op_type; + if (!result.Has(details::kProgramDescs)) { + result.Set(details::kProgramDescs, new details::ProgramDescs); + } // Step 2: Insert fused_var_name to FusedVars, and the FusedVars need be // initialized in scopes before execution. if (!result.Has(details::kFusedVars)) { result.Set(details::kFusedVars, new details::FusedVars); } + std::unordered_map> aux_var_set; + GetSpecifiedOpsAndVars(aux_var_names, opt_nodes, &aux_var_set); std::unordered_map fused_vars_name; fused_vars_name.reserve(aux_var_names.size()); auto &fused_var_set = result.Get(details::kFusedVars); const std::string prefix(details::kFusedVarNamePrefix); - // NOTE: the fused_var_name should be unique. for (auto &var_name : aux_var_names) { + // NOTE: the fused_var_name should be unique. auto fused_var_name = prefix + "_" + fuse_op_type + "_" + var_name + "_" + aux_var_set[var_name][0]; VLOG(6) << var_name << ": " << fused_var_name; - fused_vars_name.emplace(var_name, fused_var_name); PADDLE_ENFORCE_EQ(fused_var_set.count(fused_var_name), 0); fused_var_set.insert(fused_var_name); + fused_vars_name.emplace(var_name, fused_var_name); } // Step 3: Get the fused Gradient's name bool grad_fused = false; - if (result.Has(details::kParamsAndGrads)) { - auto ¶ms_grads = - result.Get(details::kParamsAndGrads); - PADDLE_ENFORCE_EQ( - params_grads.size(), aux_var_set.at(kGrad).size(), - "The number of gradients and optimizer ops is not equal."); - std::unordered_set opt_grad_set(aux_var_set.at(kGrad).begin(), - aux_var_set.at(kGrad).end()); - size_t same_grad_num = 0; - for (auto &p_g : params_grads) { - if (opt_grad_set.count(p_g.second)) { - ++same_grad_num; + if (result.Has(details::kParamsAndDenseGrads)) { + // NOTE: kParamsAndDenseGrads is generated by + // alloc_continue_space_for_grad_pass + auto ¶ms_and_dense_grads = + result.Get(details::kParamsAndDenseGrads); + PADDLE_ENFORCE_LE( + params_and_dense_grads.size(), aux_var_set.at(kGrad).size(), + "The number of dense gradients should be little than optimizer ops."); + std::unordered_set opt_grad_set(aux_var_set.at(kGrad).size()); + for (auto &p_g : params_and_dense_grads) { + opt_grad_set.insert(p_g.second); + } + std::vector new_grad_idx; + for (size_t idx = 0; idx < aux_var_set.at(kGrad).size(); ++idx) { + auto &grad = aux_var_set.at(kGrad).at(idx); + if (!opt_grad_set.count(grad)) { + new_grad_idx.emplace_back(idx); } } - // NOTE(zcd): the gradient of kParamsAndGrads may be different with the - // kGrad. - if (same_grad_num == aux_var_set.at(kGrad).size()) { + // NOTE(zcd): the gradient of kParamsAndDenseGrads may be different + // with the kGrad. The gradients of kParamsAndDenseGrads is + // collected during backward stage, but in optimization state, the + // some gradient's name maybe changed. + if (new_grad_idx.size() == 0) { if (!result.Has(details::kFusedGrads)) { PADDLE_THROW( - "The alloc_continuous_space_for_grad_pass should be called before " - "this pass."); + "The coalesce_grad_tensor_pass should " + "be called before this pass."); } auto &fused_grad = result.Get(details::kFusedGrads); PADDLE_ENFORCE_NE(fused_grad.size(), 0, @@ -115,136 +133,146 @@ void FuseOptimizerOpPass::ApplyImpl(ir::Graph *graph) const { // Sort the parameters and auxiliary variables according // to parameters' name to make variables' name correspond correctly. - SortParametersAndAuxVars(params_grads, &aux_var_set, &opt_ops); + SortParametersAndAuxVars(params_and_dense_grads, &aux_var_set, + &opt_nodes); grad_fused = true; + } else { + VLOG(10) << "The number of new gradients is " << new_grad_idx.size(); + if (new_grad_idx.size() == 1) return; + // NOTE(zcd): If the gradients of backward stage and optimization stage + // have diff, Only take care of the the gradient of optimization stage. + GradientsFilter(new_grad_idx, &opt_nodes, &aux_var_set); } } // Step 4: Alloc continuous space for Parameters and AuxiliaryVar(e.g. - // Moment1, Moment2, Beta1Pow, Beta2Pow) of all the optimizer ops separately. - aux_var_names.pop_back(); + // Moment1, Moment2, Beta1Pow, Beta2Pow) of all the optimizer ops + // separately. if (!grad_fused) { - InitFusedGradsAndAllocSpaceForGrads( - places, local_scopes, aux_var_set.at(kParam), aux_var_set.at(kGrad), - fused_vars_name.at(kGrad), &result); + InitFusedGradsAndAllocSpaceForGrads(aux_var_set.at(kParam), + aux_var_set.at(kGrad), + fused_vars_name.at(kGrad), &result); } - InitFusedVarsAndAllocSpaceForVars(places, local_scopes, aux_var_names, - aux_var_set, fused_vars_name); + aux_var_names.pop_back(); + InitFusedVarsAndAllocSpaceForVars(aux_var_names, aux_var_set, fused_vars_name, + &result); // Step 5: Fuse optimizer Ops and Scale Ops - FuseOptimizerOps(aux_var_set, fused_vars_name, opt_ops, &result); + FuseOptimizerOps(aux_var_set, fused_vars_name, opt_nodes, &result); // Step 6: Remove optimizer Ops - for (auto &opt_op : opt_ops) { + for (auto &opt_op : opt_nodes) { graph->RemoveNode(opt_op); } } +void FuseOptimizerOpPass::GradientsFilter( + const std::vector &new_grad_idx, std::vector *opt_nodes, + std::unordered_map> *aux_var_set) + const { + for (auto &aux_vars : *aux_var_set) { + std::vector sorted_vars; + sorted_vars.reserve(aux_vars.second.size()); + for (size_t i : new_grad_idx) { + sorted_vars.emplace_back(aux_vars.second.at(i)); + } + std::swap(aux_vars.second, sorted_vars); + if (VLOG_IS_ON(6)) { + std::stringstream out; + for (auto &var_name : aux_vars.second) { + out << var_name << " "; + } + VLOG(6) << aux_vars.first << ": " << out.str(); + } + } + std::vector sorted_ops; + for (size_t i : new_grad_idx) { + sorted_ops.emplace_back(opt_nodes->at(i)); + } + std::swap(*opt_nodes, sorted_ops); +} + void FuseOptimizerOpPass::InitFusedGradsAndAllocSpaceForGrads( - const std::vector &places, - const std::vector &local_scopes, const std::vector ¶ms, const std::vector &grads, const std::string &fused_grad_name, ir::Graph *result) const { - // Get Var Nodes - std::unordered_map vars; - for (ir::Node *node : result->Nodes()) { - if (node->IsVar() && node->Var()) { - // Note: The graph may have the same name node. For example, parameter - // is the input of operator and it also is the output of optimizer; - vars.emplace(node->Var()->Name(), node); - } - } - + auto vars_info = GetVarInfo(*result); // Set Gradients as Persistable to prevent this var becoming reusable. for (auto &grad_var_name : grads) { - auto iter = vars.find(grad_var_name); - PADDLE_ENFORCE(iter != vars.end()); - PADDLE_ENFORCE_NOT_NULL(iter->second->Var()); - PADDLE_ENFORCE(iter->second->Var()->GetType() == proto::VarType::LOD_TENSOR, + auto iter = vars_info.find(grad_var_name); + PADDLE_ENFORCE(iter != vars_info.end()); + PADDLE_ENFORCE(!iter->second.empty()); + PADDLE_ENFORCE_NOT_NULL(iter->second.front()->Var()); + PADDLE_ENFORCE(IsLoDTensorType(iter->second.front()->Var()->GetType()), "Currently the gradient type only should be LoDTensor when " "fusing optimizer ops."); - iter->second->Var()->SetPersistable(true); - } - - // Init Grads - for (auto it = local_scopes.rbegin(); it != local_scopes.rend(); ++it) { - auto &scope = *it; - VLOG(6) << "Init: " << fused_grad_name; - PADDLE_ENFORCE(scope->FindVar(fused_grad_name) == nullptr, - "%s has existed in scope.", fused_grad_name); - scope->Var(fused_grad_name)->GetMutable(); - for (auto &grad_var_name : grads) { - auto iter = vars.find(grad_var_name); - PADDLE_ENFORCE(iter != vars.end()); - PADDLE_ENFORCE_NOT_NULL(iter->second->Var()); - scope->Var(grad_var_name)->GetMutable(); + for (auto var : iter->second) { + var->Var()->SetPersistable(true); } } + // Define Ops - ProgramDesc program_desc; + result->Get(details::kProgramDescs).emplace_back(); + ProgramDesc &program_desc = + result->Get(details::kProgramDescs).back(); auto *global_block = program_desc.MutableBlock(0); AppendAllocContinuousSpace(params, grads, fused_grad_name, global_block, false, false); - // Run Ops - RunInitOps(places, local_scopes, *global_block); +} + +std::unordered_map> +FuseOptimizerOpPass::GetVarInfo(const Graph &result) const { + std::unordered_map> vars; + for (Node *node : result.Nodes()) { + if (node->IsVar() && node->Var()) { + // Note: The graph may have the same name node. For example, parameter + // is the input of operator and it also is the output of optimizer; + vars[node->Var()->Name()].emplace_back(node); + } + } + return vars; +} + +bool FuseOptimizerOpPass::IsLoDTensorType( + const proto::VarType::Type &type) const { + // Current only support LOD_TENSOR. + return type == proto::VarType::LOD_TENSOR; +} + +proto::VarType::Type FuseOptimizerOpPass::GetTypeOfVar( + const std::unordered_map> &var_nodes, + const std::string &name) const { + auto grad_iter = var_nodes.find(name); + PADDLE_ENFORCE(grad_iter != var_nodes.end()); + PADDLE_ENFORCE(grad_iter->second.size() > 0); + PADDLE_ENFORCE_NOT_NULL(grad_iter->second.front()->Var()); + return grad_iter->second.front()->Var()->GetType(); } void FuseOptimizerOpPass::InitFusedVarsAndAllocSpaceForVars( - const std::vector &places, - const std::vector &local_scopes, const std::vector &aux_var_names, const std::unordered_map> &aux_var_set, - const std::unordered_map &fused_vars_name) const { - // Init Vars - for (auto &var_name : aux_var_names) { - auto &fused_var_name = fused_vars_name.at(var_name); - InitVars(local_scopes, fused_var_name); - } + const std::unordered_map &fused_vars_name, + ir::Graph *result) const { // Define Ops - ProgramDesc program_desc; + result->Get(details::kProgramDescs).emplace_back(); + ProgramDesc &program_desc = + result->Get(details::kProgramDescs).back(); auto *global_block = program_desc.MutableBlock(0); for (auto &var_name : aux_var_names) { AppendAllocContinuousSpace( aux_var_set.at(var_name), aux_var_set.at(var_name), fused_vars_name.at(var_name), global_block, true); } - // Run Ops - RunInitOps(places, local_scopes, *global_block); -} - -void FuseOptimizerOpPass::RunInitOps(const std::vector &places, - const std::vector &local_scopes, - const BlockDesc &global_block) const { - for (size_t i = 0; i < local_scopes.size(); ++i) { - for (auto &op_desc : global_block.AllOps()) { - auto op = OpRegistry::CreateOp(*op_desc); - op->Run(*local_scopes[i], places[i]); - } - } -} - -void FuseOptimizerOpPass::InitVars(const std::vector &local_scopes, - const std::string &fused_var_name) const { - // Alloc parameters and auxiliary vars in the respective scope. - size_t idx = local_scopes.size(); - for (auto iter = local_scopes.rbegin(); iter != local_scopes.rend(); - ++iter, --idx) { - auto &scope = *iter; - VLOG(6) << "Init: " << fused_var_name; - PADDLE_ENFORCE(scope->FindVar(fused_var_name) == nullptr, - "%s has exist in scope[%d]", fused_var_name, idx); - scope->Var(fused_var_name)->GetMutable(); - } } void FuseOptimizerOpPass::SortParametersAndAuxVars( const std::vector> ¶ms_grads, std::unordered_map> *aux_vars_set, std::vector *ops) const { - PADDLE_ENFORCE_NE(aux_vars_set->count("Param"), static_cast(0)); - auto ¶m_vec = aux_vars_set->at("Param"); + PADDLE_ENFORCE_NE(aux_vars_set->count(kParam), static_cast(0)); + auto ¶m_vec = aux_vars_set->at(kParam); std::vector param_sort_idx; param_sort_idx.reserve(param_vec.size()); @@ -264,11 +292,13 @@ void FuseOptimizerOpPass::SortParametersAndAuxVars( } std::swap(aux_vars.second, sorted_vars); - std::stringstream out; - for (auto &var_name : aux_vars.second) { - out << var_name << " "; + if (VLOG_IS_ON(6)) { + std::stringstream out; + for (auto &var_name : aux_vars.second) { + out << var_name << " "; + } + VLOG(6) << aux_vars.first << ": " << out.str(); } - VLOG(6) << aux_vars.first << ": " << out.str(); } std::vector sorted_ops; @@ -280,21 +310,19 @@ void FuseOptimizerOpPass::SortParametersAndAuxVars( } void FuseOptimizerOpPass::GetSpecifiedOpsAndVars( - const std::string &op_type, const std::vector &aux_vars_name, - ir::Node *node, std::vector *ops, + const std::vector &aux_vars_name, + const std::vector &opt_nodes, std::unordered_map> *aux_args_name) const { - if (node->Op()->Type() != op_type) return; - - std::stringstream out; - for (auto &var_n : aux_vars_name) { - auto arg_names = node->Op()->Input(var_n); - PADDLE_ENFORCE_EQ(arg_names.size(), static_cast(1)); - (*aux_args_name)[var_n].emplace_back(arg_names[0]); - out << var_n << ", " << arg_names[0] << "; "; + for (auto &node : opt_nodes) { + std::stringstream out; + for (auto &var_n : aux_vars_name) { + auto arg_names = node->Op()->Input(var_n); + PADDLE_ENFORCE_EQ(arg_names.size(), static_cast(1)); + (*aux_args_name)[var_n].emplace_back(arg_names[0]); + out << var_n << ", " << arg_names[0] << "; "; + } } - VLOG(7) << out.str(); - ops->emplace_back(node); } void FuseOptimizerOpPass::AppendAllocContinuousSpace( @@ -302,7 +330,7 @@ void FuseOptimizerOpPass::AppendAllocContinuousSpace( const std::vector &out_args, const std::string &fused_out_arg, BlockDesc *global_block, bool copy_data, bool check_name) const { auto op_desc = global_block->AppendOp(); - op_desc->SetType("alloc_continuous_space"); + op_desc->SetType("coalesce_tensor"); op_desc->SetInput("Input", in_args); op_desc->SetOutput("Output", out_args); op_desc->SetOutput("FusedOutput", {fused_out_arg}); @@ -311,10 +339,10 @@ void FuseOptimizerOpPass::AppendAllocContinuousSpace( } void FuseOptimizerOpPass::InserInputAndOutputForOptOps( - const std::vector &opt_ops, ir::Node *opt_node) const { + const std::vector &opt_nodes, ir::Node *opt_node) const { std::unordered_set inputs; std::unordered_set outputs; - for (auto opt_op : opt_ops) { + for (auto opt_op : opt_nodes) { // set inputs inputs.insert(opt_op->inputs.begin(), opt_op->inputs.end()); for (auto &input : opt_op->inputs) { diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.h b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.h index 6271ca9acb..0432d8c473 100644 --- a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.h +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.h @@ -55,8 +55,8 @@ class FuseOptimizerOpPass : public ir::Pass { const std::vector &adam_ops, ir::Graph *graph) const = 0; void GetSpecifiedOpsAndVars( - const std::string &op_type, const std::vector &aux_vars_name, - ir::Node *node, std::vector *ops, + const std::vector &aux_vars_name, + const std::vector &opt_nodes, std::unordered_map> *aux_args_name) const; @@ -67,27 +67,30 @@ class FuseOptimizerOpPass : public ir::Pass { bool check_name = true) const; void InitFusedGradsAndAllocSpaceForGrads( - const std::vector &places, - const std::vector &local_scopes, const std::vector ¶ms, const std::vector &grads, const std::string &fused_grad_name, ir::Graph *result) const; void InitFusedVarsAndAllocSpaceForVars( - const std::vector &places, - const std::vector &local_scopes, const std::vector &aux_var_names, const std::unordered_map> &aux_var_set, - const std::unordered_map &fused_vars_name) - const; + const std::unordered_map &fused_vars_name, + ir::Graph *result) const; + + std::unordered_map> GetVarInfo( + const Graph &result) const; + + proto::VarType::Type GetTypeOfVar( + const std::unordered_map> &var_nodes, + const std::string &name) const; - void RunInitOps(const std::vector &places, - const std::vector &local_scopes, - const BlockDesc &global_block) const; + void GradientsFilter(const std::vector &new_grad_idx, + std::vector *opt_nodes, + std::unordered_map> + *aux_var_set) const; - void InitVars(const std::vector &local_scopes, - const std::string &fused_var_name) const; + bool IsLoDTensorType(const proto::VarType::Type &type) const; }; } // namespace ir diff --git a/paddle/fluid/framework/ir/graph_helper.cc b/paddle/fluid/framework/ir/graph_helper.cc index 20cf8a58ce..b397216f0b 100644 --- a/paddle/fluid/framework/ir/graph_helper.cc +++ b/paddle/fluid/framework/ir/graph_helper.cc @@ -108,8 +108,6 @@ bool VarDescIsConsistency(const Graph &graph) { var_name2node_set; for (ir::Node *node : graph.Nodes()) { if (node->IsVar() && node->Var()) { - // Note: The graph may have the same name node. For example, parameter - // is the input of operator and it also is the output of optimizer; var_name2node_set[node->Var()->Name()].emplace(node); } } diff --git a/paddle/fluid/framework/ir/graph_helper.h b/paddle/fluid/framework/ir/graph_helper.h index 59bacccea4..074ad320fb 100644 --- a/paddle/fluid/framework/ir/graph_helper.h +++ b/paddle/fluid/framework/ir/graph_helper.h @@ -38,6 +38,10 @@ struct NodeComp { bool HasCircle(const Graph &graph); // Check if the var desc of node is consistency. +// The graph may have the same name node, for example, parameter +// is the input of operator and it also is the output of optimizer. +// For the persistable variable, the var_desc of the nodes with +// the same node name should be equal. bool VarDescIsConsistency(const Graph &graph); // Find All Circles for debugging, diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc index 77173b5866..d0afebcb53 100644 --- a/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc @@ -38,7 +38,7 @@ class FuseAllReduceOpPass : public ir::Pass { #endif auto ¶ms_grads = - result.Get(details::kParamsAndGrads); + result.Get(details::kParamsAndDenseGrads); size_t num_of_all_reduce = params_grads.size(); std::unordered_set grads; grads.reserve(num_of_all_reduce); @@ -60,8 +60,8 @@ class FuseAllReduceOpPass : public ir::Pass { "it is not supported currently."); VLOG(10) << "Insert fused_all_reduce"; - auto &group_params_grads = - graph->Get(details::kGroupParamsAndGrads); + auto &group_params_grads = graph->Get( + details::kGroupParamsAndDenseGrads); for (auto &group_p_g : group_params_grads) { size_t group_size = group_p_g.size(); diff --git a/paddle/fluid/framework/ir/node.h b/paddle/fluid/framework/ir/node.h index 2e1771360d..276e6a5bf5 100644 --- a/paddle/fluid/framework/ir/node.h +++ b/paddle/fluid/framework/ir/node.h @@ -49,7 +49,7 @@ class Node { public: virtual ~Node() { if (!wrapper_.empty()) { - VLOG(4) << "ir::Node deleting a wrapper node " << Name(); + VLOG(10) << "ir::Node deleting a wrapper node " << Name(); wrapper_deleter_(); } } diff --git a/paddle/fluid/framework/ir/pass.cc b/paddle/fluid/framework/ir/pass.cc index b51e118772..ca8e829900 100644 --- a/paddle/fluid/framework/ir/pass.cc +++ b/paddle/fluid/framework/ir/pass.cc @@ -33,16 +33,12 @@ Graph* Pass::Apply(Graph* graph) const { PADDLE_ENFORCE(graph->Has(attr), "Required graph atrribute %s not set.", attr); } - auto* native_graph = graph; ApplyImpl(graph); // TODO(panyx0718): Add more verifications. PADDLE_ENFORCE(!HasCircle(*graph), "Illegal Pass. Generated graph shouldn't has cycle."); PADDLE_ENFORCE(VarDescIsConsistency(*graph), "The VarDescs of persistable variable are not consistency."); - PADDLE_ENFORCE(graph == native_graph, - "Pass::Apply() cannot delete the passed graph and shouldn't " - "return a new graph.(For the need of pybind11)"); applied_ = true; return graph; } diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 41658dec85..98ff3ea146 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -88,6 +88,7 @@ set(COMMON_OP_DEPS ${COMMON_OP_DEPS} sequence2batch lstm_compute matrix_bit_code if (WITH_GPU) set(COMMON_OP_DEPS ${COMMON_OP_DEPS} depthwise_conv prelu) endif() +set(COMMON_OP_DEPS ${COMMON_OP_DEPS} device_memory_aligment) # FIXME(typhoonzero): operator deps may not needed. # op_library(lod_tensor_to_array_op DEPS lod_rank_table_op) diff --git a/paddle/fluid/operators/alloc_continuous_space_op.cc b/paddle/fluid/operators/coalesce_tensor_op.cc similarity index 78% rename from paddle/fluid/operators/alloc_continuous_space_op.cc rename to paddle/fluid/operators/coalesce_tensor_op.cc index 59221c2e0c..ce425e7e69 100644 --- a/paddle/fluid/operators/alloc_continuous_space_op.cc +++ b/paddle/fluid/operators/coalesce_tensor_op.cc @@ -18,6 +18,7 @@ #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/device_memory_aligment.h" namespace paddle { namespace operators { @@ -26,7 +27,7 @@ static framework::proto::VarType::Type kDefaultDtype = framework::proto::VarType::Type::VarType_Type_BOOL; template -class AllocContinuousSpaceKernel : public framework::OpKernel { +class CoalesceTensorOp : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { auto &in_var_names = context.Inputs("Input"); @@ -86,8 +87,8 @@ class AllocContinuousSpaceKernel : public framework::OpKernel { framework::TensorCopy(*in_tensors[i], context.GetPlace(), dev_ctx, &sub_tensor); - offset += - Alignment(len * size_of_dtype, context.GetPlace()) / size_of_dtype; + offset += platform::Alignment(len * size_of_dtype, context.GetPlace()) / + size_of_dtype; } } else if (context.Attr("set_constant")) { math::SetConstant set_constant; @@ -106,7 +107,8 @@ class AllocContinuousSpaceKernel : public framework::OpKernel { ->ShareDataWith(fused_tensor->Slice( static_cast(offset), static_cast(offset + len))) .Resize(dim); - len = Alignment(len * size_of_dtype, context.GetPlace()) / size_of_dtype; + len = platform::Alignment(len * size_of_dtype, context.GetPlace()) / + size_of_dtype; offset += len; ss << "output(" << out_var_names[i] << ") dim:(" << dim << ")" << " address: " << out_tensors[i]->data() << ", "; @@ -115,19 +117,6 @@ class AllocContinuousSpaceKernel : public framework::OpKernel { } private: - // Note(zcd): Addresses should be aligned, otherwise, the results may have - // diff. - size_t Alignment(size_t size, const platform::Place &place) const { - // Allow to allocate the minimum chunk size is 4 KB. - size_t alignment = 1 << 12; - if (platform::is_gpu_place(place)) { - // Allow to allocate the minimum chunk size is 256 B. - alignment = 1 << 8; - } - size_t remaining = size % alignment; - return remaining == 0 ? size : size + (alignment - remaining); - } - void GetMemSizeAndDtype( const std::vector &lod_tensors, const std::vector var_names, size_t *numel, @@ -156,7 +145,8 @@ class AllocContinuousSpaceKernel : public framework::OpKernel { PADDLE_ENFORCE_GT(size, 0); ss << "input(" << var_names[i] << ") dim:(" << lod_tensors[i]->dims() << "), "; - *numel += Alignment(static_cast(size) * size_of_dtype, place) / + *numel += platform::Alignment(static_cast(size) * size_of_dtype, + place) / size_of_dtype; } @@ -176,17 +166,17 @@ class AllocContinuousSpaceOpMaker : public framework::OpProtoAndCheckerMaker { void Make() override { AddInput("Input", "(vector) The input tensors of" - " alloc_continuous_space operator.") + " coalesce_tensor operator.") .AsDuplicable(); AddOutput("Output", "(vector) The output " - "tensors of alloc_continuous_space operator. And the address " + "tensors of coalesce_tensor operator. And the address " "of output tensors are continuous, they are sliced from the " "tensor of FusedOutput.") .AsDuplicable(); AddOutput("FusedOutput", "(LoDTensor) The output tensor " - "of alloc_continuous_space operator. And the tensors of" + "of coalesce_tensor operator. And the tensors of" " Output is sliced from the tensor of FusedOutput."); AddAttr("copy_data", "Whether to copy the Input value to Output.") .SetDefault(false); @@ -204,7 +194,7 @@ class AllocContinuousSpaceOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( AllocContinuousSpace Operator. -alloc_continuous_space is used to make the address of Output +coalesce_tensor is used to make the address of Output continuous according to the Input. This Op will alloc a big tensor according to the tensors of Input, the dtype is the same with those input tensors, the size is the sum of those input tensors' numel, and the dim of the big @@ -213,7 +203,7 @@ The tensors of Output are sliced from the tensor of FusedOutput. Note that, the dtype of Input should be the same, and the dim of Input and Output should equal. The tensors of Input and Output could be the same or different. And -alloc_continuous_space allows copying the value of Input to Output, or +coalesce_tensor allows copying the value of Input to Output, or setting the Output with a constant value. )DOC"); @@ -223,27 +213,22 @@ setting the Output with a constant value. } // namespace operators } // namespace paddle -REGISTER_OPERATOR(alloc_continuous_space, - paddle::operators::AllocContinuousSpaceOp, +REGISTER_OPERATOR(coalesce_tensor, paddle::operators::AllocContinuousSpaceOp, paddle::operators::AllocContinuousSpaceOpMaker); namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CPU_KERNEL( - alloc_continuous_space, - ops::AllocContinuousSpaceKernel, - ops::AllocContinuousSpaceKernel, - ops::AllocContinuousSpaceKernel, - ops::AllocContinuousSpaceKernel); + coalesce_tensor, + ops::CoalesceTensorOp, + ops::CoalesceTensorOp, + ops::CoalesceTensorOp, + ops::CoalesceTensorOp); #ifdef PADDLE_WITH_CUDA REGISTER_OP_CUDA_KERNEL( - alloc_continuous_space, - ops::AllocContinuousSpaceKernel, - ops::AllocContinuousSpaceKernel, - ops::AllocContinuousSpaceKernel, - ops::AllocContinuousSpaceKernel); + coalesce_tensor, + ops::CoalesceTensorOp, + ops::CoalesceTensorOp, + ops::CoalesceTensorOp, + ops::CoalesceTensorOp); #endif diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index da2c51daa8..575eed355d 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -102,17 +102,17 @@ cc_test(lodtensor_printer_test SRCS lodtensor_printer_test.cc DEPS lodtensor_pri cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto framework_proto ${GPU_CTX_DEPS}) if(WITH_GPU) nv_library(profiler SRCS profiler.cc profiler.cu DEPS device_tracer gpu_info enforce) + nv_test(cuda_helper_test SRCS cuda_helper_test.cu) + nv_library(device_memory_aligment SRCS device_memory_aligment.cc DEPS cpu_info gpu_info place) else() - cc_library(profiler SRCS profiler.cc DEPS device_tracer enforce) + cc_library(profiler SRCS profiler.cc DEPS device_tracer enforce) + cc_library(device_memory_aligment SRCS device_memory_aligment.cc DEPS cpu_info place) endif() cc_test(profiler_test SRCS profiler_test.cc DEPS profiler) nv_test(float16_gpu_test SRCS float16_test.cu DEPS lod_tensor) cc_test(float16_test SRCS float16_test.cc DEPS lod_tensor) -IF(WITH_GPU) - nv_test(cuda_helper_test SRCS cuda_helper_test.cu) -ENDIF() nv_library(cuda_device_guard SRCS cuda_device_guard.cc DEPS gpu_info) if(WITH_GPU) diff --git a/paddle/fluid/platform/device_memory_aligment.cc b/paddle/fluid/platform/device_memory_aligment.cc new file mode 100644 index 0000000000..7b901856da --- /dev/null +++ b/paddle/fluid/platform/device_memory_aligment.cc @@ -0,0 +1,34 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/platform/device_memory_aligment.h" + +namespace paddle { +namespace platform { +size_t Alignment(size_t size, const platform::Place &place) { + size_t alignment = 1024; + if (platform::is_cpu_place(place)) { + alignment = CpuMinChunkSize(); + } else { +#ifdef PADDLE_WITH_CUDA + alignment = GpuMinChunkSize(); +#else + PADDLE_THROW("Fluid is not compiled with CUDA"); +#endif + } + size_t remaining = size % alignment; + return remaining == 0 ? size : size + (alignment - remaining); +} +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/device_memory_aligment.h b/paddle/fluid/platform/device_memory_aligment.h new file mode 100644 index 0000000000..2c19a2b106 --- /dev/null +++ b/paddle/fluid/platform/device_memory_aligment.h @@ -0,0 +1,27 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/platform/place.h" +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/platform/gpu_info.h" +#endif + +namespace paddle { +namespace platform { +size_t Alignment(size_t size, const platform::Place &place); +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 0232a5020b..6dd6c842f0 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -27,7 +27,7 @@ limitations under the License. */ #include "paddle/fluid/framework/feed_fetch_method.h" #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/garbage_collector.h" -#include "paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.h" +#include "paddle/fluid/framework/ir/coalesce_grad_tensor_pass.h" #include "paddle/fluid/framework/ir/pass_builder.h" #include "paddle/fluid/framework/lod_rank_table.h" #include "paddle/fluid/framework/lod_tensor.h" diff --git a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py index 816f2b7b6b..72d7cbec07 100644 --- a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py +++ b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py @@ -38,6 +38,7 @@ class TestParallelExecutorBase(unittest.TestCase): batch_size=None, allow_op_delay=False, feed_dict=None, + get_data_from_feeder=None, seed=None, use_parallel_executor=True, use_reduce=False, @@ -74,6 +75,10 @@ class TestParallelExecutorBase(unittest.TestCase): if memory_opt: fluid.memory_optimize(main) + if get_data_from_feeder is not None: + assert feed_dict is None + feed_dict = get_data_from_feeder() + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(startup) @@ -81,6 +86,7 @@ class TestParallelExecutorBase(unittest.TestCase): exec_strategy.allow_op_delay = allow_op_delay if use_fast_executor: exec_strategy.use_experimental_executor = True + build_strategy = fluid.BuildStrategy() build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce \ if use_reduce else fluid.BuildStrategy.ReduceStrategy.AllReduce diff --git a/python/paddle/fluid/tests/unittests/simple_nets.py b/python/paddle/fluid/tests/unittests/simple_nets.py index 20ec6c34c3..959042a246 100644 --- a/python/paddle/fluid/tests/unittests/simple_nets.py +++ b/python/paddle/fluid/tests/unittests/simple_nets.py @@ -55,6 +55,34 @@ def fc_with_batchnorm(use_feed=None): return loss +def bow_net(use_feed, + dict_dim, + is_sparse=False, + emb_dim=128, + hid_dim=128, + hid_dim2=96, + class_dim=2): + """ + BOW net + This model is from https://github.com/PaddlePaddle/models: + fluid/PaddleNLP/text_classification/nets.py + """ + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1) + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + emb = fluid.layers.embedding( + input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim]) + bow = fluid.layers.sequence_pool(input=emb, pool_type='sum') + bow_tanh = fluid.layers.tanh(bow) + fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") + fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh") + prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax") + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(x=cost) + + return avg_cost + + def init_data(batch_size=32, img_shape=[784], label_range=9): np.random.seed(5) assert isinstance(img_shape, list) diff --git a/python/paddle/fluid/tests/unittests/test_alloc_continuous_space_op.py b/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py similarity index 91% rename from python/paddle/fluid/tests/unittests/test_alloc_continuous_space_op.py rename to python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py index 29eb0166b7..c04abe658e 100644 --- a/python/paddle/fluid/tests/unittests/test_alloc_continuous_space_op.py +++ b/python/paddle/fluid/tests/unittests/test_coalesce_tensor_op.py @@ -24,7 +24,7 @@ alignment = 256 class TestAllocContinuousSpace(OpTest): def setUp(self): - self.op_type = "alloc_continuous_space" + self.op_type = "coalesce_tensor" self.dtype = np.float32 attrs = self.init_attr() self.copy_data = attrs["copy_data"] @@ -64,14 +64,13 @@ class TestAllocContinuousSpace(OpTest): out[0:length] = input[1].flatten() inputs.append(out) - alloc_continuous_space_var = np.concatenate([input for input in inputs]) + coalesce_tensor_var = np.concatenate([input for input in inputs]) if set_constant: - alloc_continuous_space_var = np.ones( - (len(alloc_continuous_space_var))) * constant + coalesce_tensor_var = np.ones((len(coalesce_tensor_var))) * constant outputs = [(out[0], np.ones(out[1].shape).astype(self.dtype) * constant) for out in outputs] - return outputs, alloc_continuous_space_var + return outputs, coalesce_tensor_var def test_check_output(self): if core.is_compiled_with_cuda(): diff --git a/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py index 0990045a8f..cd76b45b24 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_all_reduce_pass.py @@ -11,71 +11,122 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from simple_nets import simple_fc_net, fc_with_batchnorm, init_data +from simple_nets import simple_fc_net, fc_with_batchnorm, init_data, bow_net +from fake_reader import fake_imdb_reader from parallel_executor_test_base import TestParallelExecutorBase import paddle.fluid as fluid import paddle.fluid.core as core -import numpy as np +from functools import partial import paddle -import paddle.dataset.mnist as mnist import unittest import os -class TestMNIST(TestParallelExecutorBase): +class TestFuseAllReduceOpsBase(TestParallelExecutorBase): @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) - def _init_data(self, random=True): - np.random.seed(5) - if random: - img = np.random.random(size=[32, 784]).astype(np.float32) - else: - img = np.ones(shape=[32, 784], dtype='float32') - label = np.ones(shape=[32, 1], dtype='int64') - return img, label - - def _compare_fuse_all_reduce_ops(self, model, use_cuda): + def compare_fuse_all_reduce_ops(self, + model, + use_cuda, + init_feed_dicta=None, + get_data_from_feeder=None, + optimizer=None, + fuse_all_optimizer_ops=False): if use_cuda and not core.is_compiled_with_cuda(): return - img, label = init_data() - def _optimizer(learning_rate=1e-6): - optimizer = fluid.optimizer.SGD( - learning_rate=learning_rate, - regularization=fluid.regularizer.L2Decay(1e-6)) - return optimizer + feed_dict_data = None + if init_feed_dicta is not None: + img, label = init_feed_dicta() + feed_dict_data = {"image": img, "label": label} not_fuse_op_first_loss, not_fuse_op_last_loss = self.check_network_convergence( model, - feed_dict={"image": img, - "label": label}, + feed_dict=feed_dict_data, + get_data_from_feeder=get_data_from_feeder, use_cuda=use_cuda, fuse_all_reduce_ops=False, + fuse_all_optimizer_ops=fuse_all_optimizer_ops, memory_opt=False, - optimizer=_optimizer) + optimizer=optimizer) fuse_op_first_loss, fuse_op_last_loss = self.check_network_convergence( model, - feed_dict={"image": img, - "label": label}, + feed_dict=feed_dict_data, + get_data_from_feeder=get_data_from_feeder, use_cuda=use_cuda, fuse_all_reduce_ops=True, + fuse_all_optimizer_ops=fuse_all_optimizer_ops, memory_opt=False, - optimizer=_optimizer) + optimizer=optimizer) for loss in zip(not_fuse_op_first_loss, fuse_op_first_loss): self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) for loss in zip(not_fuse_op_last_loss, fuse_op_last_loss): self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) - def test_simple_fc_with_fuse_op(self): - self._compare_fuse_all_reduce_ops(simple_fc_net, True) - self._compare_fuse_all_reduce_ops(simple_fc_net, False) + def optimizer(self, learning_rate=1e-3): + optimizer = fluid.optimizer.SGD( + learning_rate=learning_rate, + regularization=fluid.regularizer.L2Decay(1e-3)) + return optimizer + + +class TestFuseAllReduceOps(TestFuseAllReduceOpsBase): + def _decorate_compare_fused_all_reduce(self, model, use_cuda): + self.compare_fuse_all_reduce_ops( + model, + use_cuda, + init_feed_dicta=init_data, + optimizer=self.optimizer, + fuse_all_optimizer_ops=True) + + def test_simple_fc_with_fuse_all_reduce(self): + self._decorate_compare_fused_all_reduce(simple_fc_net, True) + self._decorate_compare_fused_all_reduce(simple_fc_net, False) + + def test_batchnorm_fc_with_fuse_all_reduce(self): + self._decorate_compare_fused_all_reduce(fc_with_batchnorm, True) + self._decorate_compare_fused_all_reduce(fc_with_batchnorm, False) + + +class TestFuseAllReduceOpsAndOptiOps(TestFuseAllReduceOps): + def _decorate_compare_fused_all_reduce(self, model, use_cuda): + self.compare_fuse_all_reduce_ops( + model, + use_cuda, + init_feed_dicta=init_data, + optimizer=self.optimizer, + fuse_all_optimizer_ops=True) + + +class TestFuseAllReduceOpsWithSparseGrad(TestFuseAllReduceOpsBase): + @classmethod + def setUpClass(cls): + os.environ['CPU_NUM'] = str(4) + cls.word_dict_len = 5147 + batch_size = 64 + reader = fake_imdb_reader(cls.word_dict_len, batch_size * 100) + reader = paddle.batch(reader, batch_size=batch_size)() + cls.train_data = next(reader) + + def get_data_from_feeder(self): + place = fluid.CPUPlace() + feeder = fluid.DataFeeder(feed_list=["words", "label"], place=place) + return feeder.feed(self.train_data) + + def _decorate_compare_fused_all_reduce(self, model, use_cuda): + self.compare_fuse_all_reduce_ops( + model, + use_cuda, + get_data_from_feeder=self.get_data_from_feeder, + optimizer=self.optimizer) - def test_batchnorm_fc_with_fuse_op(self): - self._compare_fuse_all_reduce_ops(fc_with_batchnorm, True) - self._compare_fuse_all_reduce_ops(fc_with_batchnorm, False) + def test_simple_bow_net_with_fuse_all_reduce(self): + model = partial(bow_net, dict_dim=self.word_dict_len, is_sparse=True) + self._decorate_compare_fused_all_reduce(model, True) + self._decorate_compare_fused_all_reduce(model, False) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py index b92324d8a7..6cc1a81d03 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_optimizer_pass.py @@ -11,30 +11,39 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from simple_nets import simple_fc_net, fc_with_batchnorm, init_data +from simple_nets import simple_fc_net, fc_with_batchnorm, init_data, bow_net +from fake_reader import fake_imdb_reader from parallel_executor_test_base import TestParallelExecutorBase +from functools import partial +import paddle import paddle.fluid as fluid import paddle.fluid.core as core import unittest import os -class TestFuseAdamOps(TestParallelExecutorBase): +class TestFuseOptimizationOps(TestParallelExecutorBase): @classmethod def setUpClass(cls): os.environ['CPU_NUM'] = str(4) + def _get_feed_dict(self): + img, label = init_data() + return {"image": img, "label": label} + def _compare_fused_optimizer_ops(self, model, use_cuda, + feed_dict=None, + get_data_from_feeder=None, optimizer=fluid.optimizer.Adam): if use_cuda and not core.is_compiled_with_cuda(): return - img, label = init_data() - feed_dict = {"image": img, "label": label} + not_fuse_op_first_loss, not_fuse_op_last_loss = self.check_network_convergence( model, feed_dict=feed_dict, + get_data_from_feeder=get_data_from_feeder, use_cuda=use_cuda, fuse_all_optimizer_ops=False, memory_opt=False, # avoid the gradient's name changed in Python side. @@ -42,6 +51,7 @@ class TestFuseAdamOps(TestParallelExecutorBase): fuse_op_first_loss, fuse_op_last_loss = self.check_network_convergence( model, feed_dict=feed_dict, + get_data_from_feeder=get_data_from_feeder, use_cuda=use_cuda, fuse_all_optimizer_ops=True, memory_opt=False, # avoid the gradient's name changed in Python side. @@ -52,48 +62,84 @@ class TestFuseAdamOps(TestParallelExecutorBase): for loss in zip(not_fuse_op_last_loss, fuse_op_last_loss): self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) + def _decorate_compare_fused_optimizer_ops(self, model, use_cuda, optimizer): + self._compare_fused_optimizer_ops( + model, + use_cuda, + feed_dict=self._get_feed_dict(), + optimizer=optimizer) + + +class TestFuseAdamOps(TestFuseOptimizationOps): + def optimizer(self, learning_rate=1e-4): + return fluid.optimizer.Adam(learning_rate=learning_rate) + def test_simple_fc_with_fuse_op(self): - self._compare_fused_optimizer_ops(simple_fc_net, True) - self._compare_fused_optimizer_ops(simple_fc_net, False) + self._decorate_compare_fused_optimizer_ops( + simple_fc_net, True, optimizer=self.optimizer) + self._decorate_compare_fused_optimizer_ops( + simple_fc_net, False, optimizer=self.optimizer) def test_batchnorm_fc_with_fuse_op(self): - self._compare_fused_optimizer_ops(fc_with_batchnorm, True) - self._compare_fused_optimizer_ops(fc_with_batchnorm, False) + self._decorate_compare_fused_optimizer_ops( + fc_with_batchnorm, True, optimizer=self.optimizer) + self._decorate_compare_fused_optimizer_ops( + fc_with_batchnorm, False, optimizer=self.optimizer) class TestFuseSGDOps(TestFuseAdamOps): - def sgd_optimizer(self, learning_rate=1e-3): + def optimizer(self, learning_rate=1e-3): return fluid.optimizer.SGD(learning_rate=learning_rate) - def test_simple_fc_with_fuse_op(self): - self._compare_fused_optimizer_ops( - simple_fc_net, True, optimizer=self.sgd_optimizer) - self._compare_fused_optimizer_ops( - simple_fc_net, False, optimizer=self.sgd_optimizer) - - def test_batchnorm_fc_with_fuse_op(self): - self._compare_fused_optimizer_ops( - fc_with_batchnorm, True, optimizer=self.sgd_optimizer) - self._compare_fused_optimizer_ops( - fc_with_batchnorm, False, optimizer=self.sgd_optimizer) - class TestFuseMomentumOps(TestFuseAdamOps): - def momentum_optimizer(self, learning_rate=1e-3): + def optimizer(self, learning_rate=1e-3): return fluid.optimizer.Momentum( learning_rate=learning_rate, momentum=0.1) - def test_simple_fc_with_fuse_op(self): - self._compare_fused_optimizer_ops( - simple_fc_net, True, optimizer=self.momentum_optimizer) - self._compare_fused_optimizer_ops( - simple_fc_net, False, optimizer=self.momentum_optimizer) - def test_batchnorm_fc_with_fuse_op(self): - self._compare_fused_optimizer_ops( - fc_with_batchnorm, True, optimizer=self.momentum_optimizer) +class TestSpareFuseAdamOps(TestFuseOptimizationOps): + @classmethod + def setUpClass(cls): + os.environ['CPU_NUM'] = str(4) + cls.word_dict_len = 5147 + batch_size = 64 + reader = fake_imdb_reader(cls.word_dict_len, batch_size * 100) + reader = paddle.batch(reader, batch_size=batch_size)() + cls.train_data = next(reader) + + def _get_data_from_feeder(self): + place = fluid.CPUPlace() + feeder = fluid.DataFeeder(feed_list=["words", "label"], place=place) + return feeder.feed(self.train_data) + + def _decorate_compare_fused_optimizer_ops(self, model, use_cuda, optimizer): self._compare_fused_optimizer_ops( - fc_with_batchnorm, False, optimizer=self.momentum_optimizer) + model, + use_cuda, + get_data_from_feeder=self._get_data_from_feeder, + optimizer=optimizer) + + def optimizer(self, learning_rate=1e-4): + return fluid.optimizer.Adam(learning_rate=learning_rate) + + def test_simple_bow_net_with_fuse_op(self): + model = partial(bow_net, dict_dim=self.word_dict_len, is_sparse=True) + self._decorate_compare_fused_optimizer_ops( + model, True, optimizer=self.optimizer) + self._decorate_compare_fused_optimizer_ops( + model, False, optimizer=self.optimizer) + + +class TestSpareFuseSGDOps(TestSpareFuseAdamOps): + def optimizer(self, learning_rate=1e-3): + return fluid.optimizer.SGD(learning_rate=learning_rate) + + +class TestSpareFuseMomentumOps(TestSpareFuseAdamOps): + def optimizer(self, learning_rate=1e-3): + return fluid.optimizer.Momentum( + learning_rate=learning_rate, momentum=0.1) if __name__ == '__main__': -- GitLab