diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc index 22f0cb20d01cc5b40325ec37a8c7cd44105bc6c6..2f2869b1634256c3745e733bb1b99bfe4ddf8924 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc @@ -259,7 +259,7 @@ std::unique_ptr MultiDevSSAGraphBuilder::Apply( result.Set("ops", new GraphOps); // find send/recv vars so that we can place the distributed training - // realted op in the place 0 + // related op in the place 0 auto send_vars = FindDistTrainSendVars(sorted_ops); auto recv_vars = FindDistTrainRecvVars(sorted_ops); diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index e7698d8c52b411fd0afe919625034107081726b5..4a9ea6af747c36e5817ede5fafbadeea79fb07ac 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -779,7 +779,9 @@ class DistributeTranspiler(object): outputs={"Out": prefetch_output_vars}, attrs={ "epmap": pserver_endpoints, - RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE + # FIXME(qiao) temporarily disable this config because prefetch + # is not act as other rpc op, it's more like a forward op + # RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE }) # insert concat_op