diff --git a/python/paddle/distributed/communication/all_gather.py b/python/paddle/distributed/communication/all_gather.py index 18f4bbab7ce08dd786bc3c59a9e11c1bd2f51c1b..791586104d559ef0495066c98ee2bc9013b7c751 100644 --- a/python/paddle/distributed/communication/all_gather.py +++ b/python/paddle/distributed/communication/all_gather.py @@ -16,7 +16,7 @@ import numpy as np import paddle import paddle.distributed.communication.stream as stream -import paddle.fluid.framework as framework +import paddle.framework as framework from .serialization_utils import ( convert_object_to_tensor, diff --git a/python/paddle/distributed/communication/batch_isend_irecv.py b/python/paddle/distributed/communication/batch_isend_irecv.py index a85fdcbacbf94ac89aefa9733178173795345815..1dab6f533bd2ea4a00f6bea3df2ee5394d406c6a 100644 --- a/python/paddle/distributed/communication/batch_isend_irecv.py +++ b/python/paddle/distributed/communication/batch_isend_irecv.py @@ -15,8 +15,7 @@ import contextlib import paddle.distributed as dist -import paddle.fluid.core as core -import paddle.fluid.framework as framework +import paddle.framework as framework from paddle.distributed.communication.group import ( _get_global_group, _warn_cur_rank_not_in_group, @@ -79,12 +78,12 @@ class P2POp: @contextlib.contextmanager def _with_batch_p2p_guard(backend): if backend == "NCCL": - core.ProcessGroupNCCL.group_start() + framework.core.ProcessGroupNCCL.group_start() try: yield finally: if backend == "NCCL": - core.ProcessGroupNCCL.group_end() + framework.core.ProcessGroupNCCL.group_end() def _check_p2p_op_list(p2p_op_list): diff --git a/python/paddle/distributed/communication/broadcast.py b/python/paddle/distributed/communication/broadcast.py index fd6c2219c8b25a2258032c927276625e4d82c6cf..2e5dde826be0a5e034e29af0be61a3095b21b205 100644 --- a/python/paddle/distributed/communication/broadcast.py +++ b/python/paddle/distributed/communication/broadcast.py @@ -15,7 +15,7 @@ import paddle import paddle.distributed as dist import paddle.distributed.communication.stream as stream -import paddle.fluid.framework as framework +import paddle.framework as framework from .serialization_utils import ( convert_object_to_tensor, diff --git a/python/paddle/distributed/communication/group.py b/python/paddle/distributed/communication/group.py index f0236a2bdbb39f38c4502fa7e8d8031d7eed6deb..70f6c936545b57960b4288b05f3983fd605273b2 100644 --- a/python/paddle/distributed/communication/group.py +++ b/python/paddle/distributed/communication/group.py @@ -16,9 +16,7 @@ import warnings import paddle import paddle.distributed as dist -import paddle.fluid.core as core -import paddle.fluid.framework as framework -import paddle.fluid.layer_helper as layer_helper +import paddle.framework as framework class Group: @@ -239,7 +237,7 @@ def _sync_calc_stream(tensor): return paddle._legacy_C_ops.c_sync_calc_stream(tensor, tensor) else: op_type = 'c_sync_calc_stream' - helper = layer_helper.LayerHelper(op_type, **locals()) + helper = framework.LayerHelper(op_type, **locals()) helper.append_op( type=op_type, inputs={'X': [tensor]}, @@ -254,7 +252,7 @@ def _sync_comm_stream(tensor, ring_id=0): ) else: op_type = 'c_sync_comm_stream' - helper = layer_helper.LayerHelper(op_type, **locals()) + helper = framework.LayerHelper(op_type, **locals()) helper.append_op( type=op_type, inputs={'X': [tensor]}, @@ -325,7 +323,7 @@ def barrier(group=None): if framework.in_dygraph_mode(): group = _get_global_group() if group is None else group place = framework._current_expected_place() - if isinstance(place, core.CPUPlace): + if isinstance(place, framework.CPUPlace): task = group.process_group.barrier() else: device_id = place.get_device_id() @@ -344,7 +342,7 @@ def barrier(group=None): op_type = 'barrier' if not isinstance(ring_id, int): raise ValueError("The type of 'group' for barrier must be int.") - helper = layer_helper.LayerHelper(op_type, **locals()) + helper = framework.LayerHelper(op_type, **locals()) helper.append_op( type=op_type, inputs={'X': [barrier_tensor]}, diff --git a/python/paddle/distributed/communication/reduce.py b/python/paddle/distributed/communication/reduce.py index 973e003e0604e6612ce80e8d5860c539d09b6df5..9d4f9548d3ed8e119a064f3a4d8b07dd3d3fb0e0 100644 --- a/python/paddle/distributed/communication/reduce.py +++ b/python/paddle/distributed/communication/reduce.py @@ -14,8 +14,7 @@ import paddle import paddle.distributed.communication.stream as stream -import paddle.fluid.core as core -import paddle.fluid.framework as framework +import paddle.framework as framework class ReduceOp: @@ -59,13 +58,13 @@ class ReduceOp: def _get_reduce_op(reduce_op, func_name): if framework.in_dygraph_mode(): if reduce_op == ReduceOp.SUM: - return core.ReduceOp.SUM + return framework.core.ReduceOp.SUM elif reduce_op == ReduceOp.MAX: - return core.ReduceOp.MAX + return framework.core.ReduceOp.MAX elif reduce_op == ReduceOp.MIN: - return core.ReduceOp.MIN + return framework.core.ReduceOp.MIN elif reduce_op == ReduceOp.PROD: - return core.ReduceOp.PRODUCT + return framework.core.ReduceOp.PRODUCT else: if reduce_op == ReduceOp.SUM: return 'c_{}_sum'.format(func_name) diff --git a/python/paddle/distributed/communication/scatter.py b/python/paddle/distributed/communication/scatter.py index ee5886c414d61d2340d9724916c87cb013a664f8..455bb5d1cf78a652f8b5cc38526e16d562864008 100644 --- a/python/paddle/distributed/communication/scatter.py +++ b/python/paddle/distributed/communication/scatter.py @@ -17,7 +17,7 @@ import numpy as np import paddle import paddle.distributed as dist import paddle.distributed.communication.stream as stream -import paddle.fluid.framework as framework +import paddle.framework as framework from .serialization_utils import ( convert_object_to_tensor, diff --git a/python/paddle/distributed/communication/stream/all_gather.py b/python/paddle/distributed/communication/stream/all_gather.py index 779a3c8f64cf7138b61cf50ad59abf920892d2a3..f5d21a35daac1a10adad9071e5ac7a504ff857ef 100644 --- a/python/paddle/distributed/communication/stream/all_gather.py +++ b/python/paddle/distributed/communication/stream/all_gather.py @@ -15,8 +15,7 @@ import paddle import paddle.distributed as dist import paddle.fluid.data_feeder as data_feeder -import paddle.fluid.framework as framework -import paddle.fluid.layer_helper as layer_helper +import paddle.framework as framework from paddle.distributed.communication.group import _get_global_group @@ -62,7 +61,7 @@ def _all_gather_in_dygraph( def _all_gather_in_static_mode(tensor_list, tensor, group, sync_op): op_type = 'c_allgather' - helper = layer_helper.LayerHelper(op_type, **locals()) + helper = framework.LayerHelper(op_type, **locals()) out = helper.create_variable_for_type_inference(dtype=tensor.dtype) for elem in tensor_list: data_feeder.check_variable_and_dtype( diff --git a/python/paddle/distributed/communication/stream/all_reduce.py b/python/paddle/distributed/communication/stream/all_reduce.py index 412085b1b1720ab84f2a7378b5ae49441aed4b55..786e4284c755645fdedf0e4b722fbe0e2734a07e 100644 --- a/python/paddle/distributed/communication/stream/all_reduce.py +++ b/python/paddle/distributed/communication/stream/all_reduce.py @@ -13,8 +13,7 @@ # limitations under the License. import paddle.fluid.data_feeder as data_feeder -import paddle.fluid.framework as framework -import paddle.fluid.layer_helper as layer_helper +import paddle.framework as framework from paddle.distributed.communication.group import ( _get_global_group, _warn_cur_rank_not_in_group, @@ -60,7 +59,7 @@ def _all_reduce_in_static_mode(tensor, op, group, sync_op, use_calc_stream): # TODO: Support task and use task.wait in static graph mode # Use use_calc_stream rather than sync_op - helper = layer_helper.LayerHelper(op_type, **locals()) + helper = framework.LayerHelper(op_type, **locals()) helper.append_op( type=op_type, inputs={'X': [tensor]}, diff --git a/python/paddle/distributed/communication/stream/all_to_all.py b/python/paddle/distributed/communication/stream/all_to_all.py index d64ccb742ef08cac09217bffd2b2f59b8d185ceb..4a804cf4ababf15559d97300d9b6f778f282bf83 100644 --- a/python/paddle/distributed/communication/stream/all_to_all.py +++ b/python/paddle/distributed/communication/stream/all_to_all.py @@ -15,8 +15,7 @@ import paddle import paddle.distributed as dist import paddle.fluid.data_feeder as data_feeder -import paddle.fluid.framework as framework -import paddle.fluid.layer_helper as layer_helper +import paddle.framework as framework from paddle.distributed.communication.group import ( _get_global_group, _warn_cur_rank_not_in_group, @@ -73,7 +72,7 @@ def _all_to_all_in_static_mode( op_type = 'alltoall' ring_id = 0 if group is None else group.id nranks = dist.get_world_size() - helper = layer_helper.LayerHelper(op_type, **locals()) + helper = framework.LayerHelper(op_type, **locals()) in_tensor = in_tensor_or_tensor_list if isinstance(in_tensor_or_tensor_list, list): diff --git a/python/paddle/distributed/communication/stream/broadcast.py b/python/paddle/distributed/communication/stream/broadcast.py index cb6fbc75d152807c6586fff93eb4321fb1be1f07..c7f6fc3203de34f8981db89e277ec6fad1f70041 100644 --- a/python/paddle/distributed/communication/stream/broadcast.py +++ b/python/paddle/distributed/communication/stream/broadcast.py @@ -13,8 +13,7 @@ # limitations under the License. import paddle.fluid.data_feeder as data_feeder -import paddle.fluid.framework as framework -import paddle.fluid.layer_helper as layer_helper +import paddle.framework as framework from paddle.distributed.communication.group import ( _get_global_group, _get_or_throw_group_rank, @@ -57,7 +56,7 @@ def _broadcast_in_static_mode( ) op_type = 'c_broadcast' - helper = layer_helper.LayerHelper(op_type, **locals()) + helper = framework.LayerHelper(op_type, **locals()) ring_id = 0 if group is None else group.id helper.append_op( diff --git a/python/paddle/distributed/communication/stream/recv.py b/python/paddle/distributed/communication/stream/recv.py index fcd007e6d333dba5d3bdaa7d9574c6a60b53a850..7b623d67203c2838eb362baf53c18cc63d45ca8e 100644 --- a/python/paddle/distributed/communication/stream/recv.py +++ b/python/paddle/distributed/communication/stream/recv.py @@ -13,8 +13,7 @@ # limitations under the License. import paddle.fluid.data_feeder as data_feeder -import paddle.fluid.framework as framework -import paddle.fluid.layer_helper as layer_helper +import paddle.framework as framework from paddle.distributed.communication.group import ( _get_global_group, _get_or_throw_group_rank, @@ -48,7 +47,7 @@ def _recv_in_static_mode( 'recv', ) ring_id = 0 if group is None else group.id - helper = layer_helper.LayerHelper(op_type, **locals()) + helper = framework.LayerHelper(op_type, **locals()) helper.append_op( type=op_type, outputs={'Out': [tensor]}, diff --git a/python/paddle/distributed/communication/stream/reduce.py b/python/paddle/distributed/communication/stream/reduce.py index 8bd81bd586a98b1d2bece7375acb10e9976278d3..e81ff4de9c047d72db28ef934603f9c828fe950c 100644 --- a/python/paddle/distributed/communication/stream/reduce.py +++ b/python/paddle/distributed/communication/stream/reduce.py @@ -13,8 +13,7 @@ # limitations under the License. import paddle.fluid.data_feeder as data_feeder -import paddle.fluid.framework as framework -import paddle.fluid.layer_helper as layer_helper +import paddle.framework as framework from paddle.distributed.communication.group import ( _get_global_group, _get_or_throw_group_rank, @@ -63,7 +62,7 @@ def _reduce_in_static_mode( op_type = _get_reduce_op(op, "reduce") ring_id = 0 if group is None else group.id - helper = layer_helper.LayerHelper(op_type, **locals()) + helper = framework.LayerHelper(op_type, **locals()) helper.append_op( type=op_type, inputs={'X': [tensor]}, diff --git a/python/paddle/distributed/communication/stream/reduce_scatter.py b/python/paddle/distributed/communication/stream/reduce_scatter.py index 3442365863002efae8008317dfadad3ae7f3ce66..b0776246d0af98735e4e90e2713899e0632afc43 100644 --- a/python/paddle/distributed/communication/stream/reduce_scatter.py +++ b/python/paddle/distributed/communication/stream/reduce_scatter.py @@ -13,7 +13,7 @@ # limitations under the License. import paddle -import paddle.fluid.framework as framework +import paddle.framework as framework from paddle.distributed.communication.group import ( _get_global_group, _warn_cur_rank_not_in_group, diff --git a/python/paddle/distributed/communication/stream/scatter.py b/python/paddle/distributed/communication/stream/scatter.py index 6f332fbbd6fb3301b5c010fc0e899896239bcb44..db2010e4116dc88d9344eb4bd61c753e34e08ab3 100644 --- a/python/paddle/distributed/communication/stream/scatter.py +++ b/python/paddle/distributed/communication/stream/scatter.py @@ -17,8 +17,7 @@ import warnings import paddle import paddle.distributed as dist import paddle.fluid.data_feeder as data_feeder -import paddle.fluid.framework as framework -import paddle.fluid.layer_helper as layer_helper +import paddle.framework as framework from paddle.distributed.communication.group import ( _get_global_group, _get_or_throw_group_rank, @@ -113,7 +112,7 @@ def _scatter_in_static_mode( ) op_type = 'c_scatter' - helper = layer_helper.LayerHelper(op_type, **locals()) + helper = framework.LayerHelper(op_type, **locals()) helper.append_op( type=op_type, inputs={'X': [input_tensor]}, diff --git a/python/paddle/distributed/communication/stream/send.py b/python/paddle/distributed/communication/stream/send.py index e18a9a5738482f10ac424cdce7138cf621b32aed..30a965e82154ce72900b511e97371d836aca14fc 100644 --- a/python/paddle/distributed/communication/stream/send.py +++ b/python/paddle/distributed/communication/stream/send.py @@ -13,8 +13,7 @@ # limitations under the License. import paddle.fluid.data_feeder as data_feeder -import paddle.fluid.framework as framework -import paddle.fluid.layer_helper as layer_helper +import paddle.framework as framework from paddle.distributed.communication.group import ( _get_global_group, _get_or_throw_group_rank, @@ -49,7 +48,7 @@ def _send_in_static_mode( ) ring_id = 0 if group is None else group.id - helper = layer_helper.LayerHelper(op_type, **locals()) + helper = framework.LayerHelper(op_type, **locals()) helper.append_op( type=op_type, inputs={'X': [tensor]},