From e26cced7ccad46c3165b9c8dc2ee8831c0f5aa8d Mon Sep 17 00:00:00 2001 From: Wu Yi Date: Thu, 27 Dec 2018 18:51:01 +0800 Subject: [PATCH] refine batch merge pass (#14777) * refine batch merge pass * refine batch merge pass test=develop --- .../framework/ir/multi_batch_merge_pass.cc | 29 ++++++++++++++++--- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/framework/ir/multi_batch_merge_pass.cc b/paddle/fluid/framework/ir/multi_batch_merge_pass.cc index bd5b76426..9e77f98e9 100644 --- a/paddle/fluid/framework/ir/multi_batch_merge_pass.cc +++ b/paddle/fluid/framework/ir/multi_batch_merge_pass.cc @@ -75,6 +75,7 @@ std::unique_ptr BatchMergePass::ApplyImpl( std::vector optimize_ops; std::vector lr_ops; // ops other than forward/backward/optimize std::unordered_set grad_names; + std::unordered_map gradname2paramname; std::vector nodes = TopologySortOperations(*graph); auto origin_nodes = graph->ReleaseNodes(); @@ -99,6 +100,7 @@ std::unique_ptr BatchMergePass::ApplyImpl( auto op_role_vars = boost::get>(op_role_var); for (size_t i = 0; i < op_role_vars.size(); i += 2) { grad_names.insert(op_role_vars[i + 1]); + gradname2paramname[op_role_vars[i + 1]] = op_role_vars[i]; } } else if (op_role & static_cast(framework::OpRole::kLRSched)) { lr_ops.push_back(node); @@ -109,7 +111,7 @@ std::unique_ptr BatchMergePass::ApplyImpl( // 2. copy forward backward ir::Node* prev_repeat_last_op_node = nullptr; - // record origin_grad -> repeated grad list map. + // record origin_grad -> repeated_grad_list map. std::map> grad_repeated_map; std::map> created; std::unordered_set bn_vars_need_rename; @@ -124,10 +126,16 @@ std::unique_ptr BatchMergePass::ApplyImpl( if (grad_names.find(outname) != grad_names.end()) { std::string new_gname = string::Sprintf("%s.repeat.%d", outname, i); repeated_op.RenameOutput(outname, new_gname); + // remove op_role_var for backward ops that outputs grad for a + // parameter. + repeated_op.SetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName(), + std::vector()); } } // 3.5 let batch_norm ops use independent vars, note batch_norm_grad do - // not need this update + // not need this update, because only moving mean and variance should be + // differ, trainable parameter scale and bias is the same as other + // parameters. if (node->Name() == "batch_norm") { // NOTE: assume bn op created by layers use save var as output mean and // variance @@ -224,16 +232,25 @@ std::unique_ptr BatchMergePass::ApplyImpl( var->inputs.push_back(repeated_node); } } - } + } // end copy forward backward - // 5. create GRAD merge op node + // 5. create GRAD merge op node: sum(repeat.0...repeat.n) -> + // scale(1/num_repeats) for (auto kv : grad_repeated_map) { OpDesc sum_op; sum_op.SetType("sum"); std::vector repeated_grad_names; + std::vector param_grad_op_role_var; for (auto r : kv.second) { repeated_grad_names.push_back(r->Var()->Name()); } + // NOTE: use op_role_var to control allreduce op appending in + // multi_devices_graph_pass, we want to append op_role_var + // only once for the merged gradient, so break after first call. + param_grad_op_role_var.push_back( + gradname2paramname.at(kv.first->Var()->Name())); // param + param_grad_op_role_var.push_back(kv.first->Var()->Name()); // grad + sum_op.SetInput("X", repeated_grad_names); sum_op.SetOutput("Out", {kv.first->Var()->Name()}); sum_op.SetAttr(OpProtoAndCheckerMaker::OpRoleAttrName(), @@ -256,6 +273,10 @@ std::unique_ptr BatchMergePass::ApplyImpl( scale_op.SetAttr("scale", static_cast(1.0f / num_repeats)); scale_op.SetAttr(OpProtoAndCheckerMaker::OpRoleAttrName(), static_cast(OpRole::kBackward)); + + scale_op.SetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName(), + param_grad_op_role_var); + auto scale_op_node = result.CreateOpNode(&scale_op); scale_op_node->inputs.push_back(sum_out_var_node); sum_out_var_node->outputs.push_back(scale_op_node); -- GitLab