From 12a0e2ed9d3a78d817e4b85fed5cc6f651ad5a31 Mon Sep 17 00:00:00 2001 From: Xin Pan Date: Fri, 22 Feb 2019 17:19:31 +0800 Subject: [PATCH] polish codes test=develop --- paddle/fluid/framework/details/all_reduce_deps_pass.cc | 4 ++-- paddle/fluid/framework/details/memory_optimize_helper.cc | 6 +++--- paddle/fluid/framework/details/memory_optimize_pass.cc | 3 ++- paddle/fluid/framework/details/sequential_execution_pass.cc | 4 ++-- paddle/fluid/framework/ir/graph.cc | 2 +- paddle/fluid/framework/ir/graph.h | 2 +- python/paddle/fluid/framework.py | 3 +-- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/framework/details/all_reduce_deps_pass.cc b/paddle/fluid/framework/details/all_reduce_deps_pass.cc index 87d3b1042..ff223e616 100644 --- a/paddle/fluid/framework/details/all_reduce_deps_pass.cc +++ b/paddle/fluid/framework/details/all_reduce_deps_pass.cc @@ -50,7 +50,7 @@ std::unique_ptr AllReduceDepsPass::ApplyImpl( std::unordered_map vars; // TODO(gongwb): use graph topology sort to find the order of operators. // Note that must assert topology sort is stable - auto& ops = graph->Get>(kAllOpDescs); + auto& ops = graph->Get>(kStaleProgramOpDescs); for (auto* op_desc : ops) { auto outputs = op_desc->Outputs(); for (auto& o_it : outputs) { @@ -120,4 +120,4 @@ std::unique_ptr AllReduceDepsPass::ApplyImpl( REGISTER_PASS(all_reduce_deps_pass, paddle::framework::details::AllReduceDepsPass) - .RequireGraphAttr(paddle::framework::details::kAllOpDescs); + .RequireGraphAttr(paddle::framework::details::kStaleProgramOpDescs); diff --git a/paddle/fluid/framework/details/memory_optimize_helper.cc b/paddle/fluid/framework/details/memory_optimize_helper.cc index db4e805bb..083b6b9d8 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.cc +++ b/paddle/fluid/framework/details/memory_optimize_helper.cc @@ -33,10 +33,10 @@ namespace details { using paddle::framework::VarDesc; std::vector SortOpLikeDescOrder(const ir::Graph& graph) { - PADDLE_ENFORCE(graph.Has(kAllOpDescs), - "Graph has no attribute of kAllOpDescs."); + PADDLE_ENFORCE(graph.Has(kStaleProgramOpDescs), + "Graph has no attribute of kStaleProgramOpDescs."); // 1. get op desc order - auto& op_descs = graph.Get>(kAllOpDescs); + auto& op_descs = graph.Get>(kStaleProgramOpDescs); // 2. topology sort order auto nodes = graph.Nodes(); diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/details/memory_optimize_pass.cc index 20d486588..fd02bc469 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/details/memory_optimize_pass.cc @@ -336,4 +336,5 @@ void MemoryOptimizePass::RenameVarInGraphNode(const std::string& var, } // namespace paddle REGISTER_PASS(memory_optimize_pass, - paddle::framework::details::MemoryOptimizePass); + paddle::framework::details::MemoryOptimizePass) + .RequireGraphAttr(paddle::framework::details::kAllOpDescs); diff --git a/paddle/fluid/framework/details/sequential_execution_pass.cc b/paddle/fluid/framework/details/sequential_execution_pass.cc index d4e7bb658..0b53a76e7 100644 --- a/paddle/fluid/framework/details/sequential_execution_pass.cc +++ b/paddle/fluid/framework/details/sequential_execution_pass.cc @@ -40,7 +40,7 @@ std::unique_ptr SequentialExecutionPass::ApplyImpl( static std::unordered_set skip_dist_ops{ "send", "recv", "send_barrier", "fetch_barrier"}; - auto &ops = graph->Get>(kAllOpDescs); + auto &ops = graph->Get>(kStaleProgramOpDescs); std::vector op_node_list; op_node_list.reserve(ops.size()); @@ -107,4 +107,4 @@ std::unique_ptr SequentialExecutionPass::ApplyImpl( REGISTER_PASS(sequential_execution_pass, paddle::framework::details::SequentialExecutionPass) - .RequireGraphAttr(paddle::framework::details::kAllOpDescs); + .RequireGraphAttr(paddle::framework::details::kStaleProgramOpDescs); diff --git a/paddle/fluid/framework/ir/graph.cc b/paddle/fluid/framework/ir/graph.cc index 5ea30f824..5e954fa9c 100644 --- a/paddle/fluid/framework/ir/graph.cc +++ b/paddle/fluid/framework/ir/graph.cc @@ -77,7 +77,7 @@ std::map> Graph::InitFromProgram( } } Set>( - details::kAllOpDescs, + details::kStaleProgramOpDescs, new std::vector(program.Block(0).AllOps())); return var_nodes; } diff --git a/paddle/fluid/framework/ir/graph.h b/paddle/fluid/framework/ir/graph.h index 296f3b839..8cb3b874d 100644 --- a/paddle/fluid/framework/ir/graph.h +++ b/paddle/fluid/framework/ir/graph.h @@ -31,7 +31,7 @@ namespace details { // This attr is not recommended, because the graph should not dependence // the program once it is built. -constexpr char kAllOpDescs[] = "all_op_descs"; +constexpr char kStaleProgramOpDescs[] = "stale_program_op_descs"; } // namespace details namespace ir { diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 72f1eae95..15367c724 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -2322,7 +2322,7 @@ class Program(object): @staticmethod def _construct_from_desc(desc): """ - Construct a program from program desc. (Experiment) + Construct a program from program desc. Args: desc(core.ProgramDesc): The program desc for constructing. @@ -2332,7 +2332,6 @@ class Program(object): """ p = Program() p.desc = desc - # TODO(wangzhen): Block.vars/ops are not filled, should fix it. p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())] p._sync_with_cpp() return p -- GitLab