diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_pass.cc index 3c0a8d7020a570ae06735916b922d8d4afaa4b46..7d1e63f3682bca8965f6c5e695132dff44fa3715 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.cc @@ -36,6 +36,11 @@ namespace framework { namespace details { namespace { +// TODO(panyx0718): Clean this up as well. +// all operators. NOTE that even we use a vector here, the operators is +// unordered. +typedef std::vector GraphOps; +const char kGraphOps[] = "ops"; bool OpHaveRole(const ir::Node &node, const framework::OpRole &role) { return boost::get( @@ -221,6 +226,7 @@ std::unique_ptr MultiDevSSAGraphBuilderBase::ApplyImpl( * Only variables should be the leaves of graph. */ AddOutputToLeafOps(&result); + result.Erase(kGraphOps); return graph; } diff --git a/paddle/fluid/framework/details/multi_devices_helper.h b/paddle/fluid/framework/details/multi_devices_helper.h index 5331b750eb429bfde1ad7cd461ec5ce6ddbb0d14..9afbb91005c9c3a9d2e185f4dfa901ebf812ee19 100644 --- a/paddle/fluid/framework/details/multi_devices_helper.h +++ b/paddle/fluid/framework/details/multi_devices_helper.h @@ -44,12 +44,6 @@ const char kGraphVars[] = "vars"; typedef std::unordered_set GraphDepVars; const char kGraphDepVars[] = "dep_vars"; -// TODO(panyx0718): Clean this up as well. -// all operators. NOTE that even we use a vector here, the operators is -// unordered. -typedef std::vector GraphOps; -const char kGraphOps[] = "ops"; - } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc index 3740b795fa47f7551b58473031f36df57041a192..4c8f69c68ce17d0143c34e8adbab92cdc90058c8 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc @@ -30,7 +30,6 @@ ParallelSSAGraphExecutor::SeparateMultiDevicesGraph( auto &g = graphs.back(); g->Set(kGraphVars, new GraphVars(1UL)); g->Set(kGraphDepVars, new GraphDepVars); - g->Set(kGraphOps, new GraphOps); } auto op_handles = ir::FilterByNodeWrapper(*graph); @@ -38,9 +37,7 @@ ParallelSSAGraphExecutor::SeparateMultiDevicesGraph( auto &dev_ctx = op->DeviceContext(); auto &p = dev_ctx.begin()->first; int dev_id = boost::get(p).device; - auto &dev_ops = graphs[dev_id]->Get(kGraphOps); auto &dev_dummys = graphs[dev_id]->Get(kGraphDepVars); - dev_ops.emplace_back(op); graphs[dev_id]->AddNode(graph->RemoveNode(op->Node()).release()); for (auto &var : op->Inputs()) { diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.h b/paddle/fluid/framework/details/parallel_ssa_graph_executor.h index f59305bf9827cfc6032eda355d884dec825dc351..1c35d45fdd356a867d1ad80b345379395e03172e 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.h @@ -14,8 +14,6 @@ #pragma once -#include -#include #include #include diff --git a/paddle/fluid/framework/ir/graph.h b/paddle/fluid/framework/ir/graph.h index d5b3782f622a8d5addc7eb51cebf7c8fbac3a453..296f3b83961c1379ee2c1237aa15784791b46878 100644 --- a/paddle/fluid/framework/ir/graph.h +++ b/paddle/fluid/framework/ir/graph.h @@ -28,6 +28,9 @@ namespace paddle { namespace framework { namespace details { + +// This attr is not recommended, because the graph should not dependence +// the program once it is built. constexpr char kAllOpDescs[] = "all_op_descs"; } // namespace details