diff --git a/python/paddle/distributed/auto_parallel/__init__.py b/python/paddle/distributed/auto_parallel/__init__.py index 835ca68df2d1c1169b9eaca767715070ca00e013..4486b3220fa4dde5703f4cb404a858729fe00c40 100644 --- a/python/paddle/distributed/auto_parallel/__init__.py +++ b/python/paddle/distributed/auto_parallel/__init__.py @@ -14,7 +14,7 @@ from .strategy import Strategy from .process_mesh import ProcessMesh -from .engine import Engine +from .static.engine import Engine from .interface import shard_tensor from .interface import shard_op from .interface import recompute diff --git a/python/paddle/distributed/auto_parallel/dygraph/__init__.py b/python/paddle/distributed/auto_parallel/dygraph/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1ee2fa6eb0697258e81c48a39f7fe773712a81a4 --- /dev/null +++ b/python/paddle/distributed/auto_parallel/dygraph/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/python/paddle/distributed/auto_parallel/interface.py b/python/paddle/distributed/auto_parallel/interface.py index 76207bc588968c908e9042d351d4d24e00cc93eb..06a24b0c5433ee8f119d704bb181f561d385fcb7 100644 --- a/python/paddle/distributed/auto_parallel/interface.py +++ b/python/paddle/distributed/auto_parallel/interface.py @@ -14,11 +14,11 @@ import paddle -from .dist_context import get_default_distributed_context -from .dist_op import DistributedOperatorHelper -from .dist_tensor import DistributedTensor from .process_mesh import ProcessMesh, get_current_process_mesh -from .utils import ( +from .static.dist_context import get_default_distributed_context +from .static.dist_op import DistributedOperatorHelper +from .static.dist_tensor import DistributedTensor +from .static.utils import ( __no_shape_var_type__, convert_to_dims_mapping, verify_shard_spec, diff --git a/python/paddle/distributed/auto_parallel/process_mesh.py b/python/paddle/distributed/auto_parallel/process_mesh.py index e2ccd16aaaad4bd150a09cf65995ecfbf2c6da9e..1c2f292e5f861497354ae4603cd7562c82913087 100644 --- a/python/paddle/distributed/auto_parallel/process_mesh.py +++ b/python/paddle/distributed/auto_parallel/process_mesh.py @@ -140,12 +140,12 @@ class ProcessMesh(core.ProcessMesh): ) # Store all process meshes - from .dist_context import get_default_distributed_context + from .static.dist_context import get_default_distributed_context default_dist_cxt = get_default_distributed_context() default_dist_cxt.add_process_mesh(self) # Add new processes to process group 0 - from .process_group import get_process_group + from .static.process_group import get_process_group pg0 = get_process_group(0) pg0.add_ranks(self.process_ids) @@ -204,14 +204,14 @@ class ProcessMesh(core.ProcessMesh): self._old_op_size = len(cur_block.ops) def __exit__(self, exc_type, exc_value, exc_traceback): - from .dist_op import DistributedOperator - from .dist_tensor import DistributedTensor + from .static.dist_op import DistributedOperator + from .static.dist_tensor import DistributedTensor default_prog = paddle.static.default_main_program() cur_block = default_prog.current_block() new_var_names = list(cur_block.vars.keys()) new_op_size = len(cur_block.ops) - from .dist_context import get_default_distributed_context + from .static.dist_context import get_default_distributed_context default_dist_ctx = get_default_distributed_context() for name in new_var_names: diff --git a/python/paddle/distributed/auto_parallel/random.py b/python/paddle/distributed/auto_parallel/random.py index 5ca6d9e9ea06961e67b82b6962f241dfcc8ce64e..d238fd60232d07996b3f60a6e61a3f8a6b7db495 100644 --- a/python/paddle/distributed/auto_parallel/random.py +++ b/python/paddle/distributed/auto_parallel/random.py @@ -17,7 +17,7 @@ import paddle from ..utils.log_utils import get_logger from .process_mesh import retrive_unique_id_for_process_mesh -from .utils import _get_idx_in_axis +from .static.utils import _get_idx_in_axis _logger = get_logger(logging.INFO) diff --git a/python/paddle/distributed/auto_parallel/static/__init__.py b/python/paddle/distributed/auto_parallel/static/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6f0ea85344b7e0c679730356928c8749cf71cd66 --- /dev/null +++ b/python/paddle/distributed/auto_parallel/static/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/python/paddle/distributed/auto_parallel/auto_align_tool.py b/python/paddle/distributed/auto_parallel/static/auto_align_tool.py similarity index 99% rename from python/paddle/distributed/auto_parallel/auto_align_tool.py rename to python/paddle/distributed/auto_parallel/static/auto_align_tool.py index 76a8db09fdcd940e509375df4e61ac05ef12a127..2cd9e4a05d9193af9b4eacc22d8fe877cd5b9047 100644 --- a/python/paddle/distributed/auto_parallel/auto_align_tool.py +++ b/python/paddle/distributed/auto_parallel/static/auto_align_tool.py @@ -21,11 +21,11 @@ import numpy as np import paddle import paddle.distributed as dist -from paddle.distributed.auto_parallel.converter import Converter -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.converter import Converter +from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( is_backward_op, is_forward_op, is_loss_op, diff --git a/python/paddle/distributed/auto_parallel/callbacks.py b/python/paddle/distributed/auto_parallel/static/callbacks.py similarity index 99% rename from python/paddle/distributed/auto_parallel/callbacks.py rename to python/paddle/distributed/auto_parallel/static/callbacks.py index db7f460b0f0521ab8e8949d651661696948ff07e..6cbfaceee34802465075a587e01aa8e1e079e2f8 100644 --- a/python/paddle/distributed/auto_parallel/callbacks.py +++ b/python/paddle/distributed/auto_parallel/static/callbacks.py @@ -24,7 +24,7 @@ from paddle.hapi.callbacks import ( ProgBarLogger, ) -from .interface import CollectionNames, get_collection +from ..interface import CollectionNames, get_collection def config_callbacks( diff --git a/python/paddle/distributed/auto_parallel/cluster.py b/python/paddle/distributed/auto_parallel/static/cluster.py similarity index 99% rename from python/paddle/distributed/auto_parallel/cluster.py rename to python/paddle/distributed/auto_parallel/static/cluster.py index 937404369700ac9d5ce5fe695cb8e1609722a9a1..c5df57be2bf3f6bc99e7154354f39872fc14dfc7 100644 --- a/python/paddle/distributed/auto_parallel/cluster.py +++ b/python/paddle/distributed/auto_parallel/static/cluster.py @@ -20,7 +20,7 @@ from enum import IntEnum, unique import paddle -from ..utils.log_utils import get_logger +from ...utils.log_utils import get_logger @unique diff --git a/python/paddle/distributed/auto_parallel/cluster_v2.py b/python/paddle/distributed/auto_parallel/static/cluster_v2.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cluster_v2.py rename to python/paddle/distributed/auto_parallel/static/cluster_v2.py diff --git a/python/paddle/distributed/auto_parallel/completion.py b/python/paddle/distributed/auto_parallel/static/completion.py similarity index 99% rename from python/paddle/distributed/auto_parallel/completion.py rename to python/paddle/distributed/auto_parallel/static/completion.py index 5f2ab7e102b0b2fca29c08bf3e83518b0b259205..cd505be0289260a7bcfc34e29774e47faed888d7 100644 --- a/python/paddle/distributed/auto_parallel/completion.py +++ b/python/paddle/distributed/auto_parallel/static/completion.py @@ -18,11 +18,11 @@ import logging from paddle.distributed.fleet.meta_optimizers.common import OpRole from paddle.framework import core +from ..process_mesh import ProcessMesh, compute_compatible_process_mesh from .dist_attribute import OperatorDistAttr, TensorDistAttr from .dist_context import _node_id from .operators import find_compatible_distributed_operator_impls from .process_group import get_world_process_group -from .process_mesh import ProcessMesh, compute_compatible_process_mesh from .utils import ( __no_shape_var_type__, get_logger, @@ -1641,7 +1641,7 @@ class Completer: """Complete the annotation of vars and ops in the update phase for parallel program.""" # Copy the dist tensors and dist ops annotated by users from the default context # global mesh - from paddle.distributed.auto_parallel.process_group import ( + from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) @@ -1895,7 +1895,7 @@ class Completer: def _init_global_mesh_for_program(self): # Copy the dist tensors and dist ops annotated by users from the default context # global mesh - from paddle.distributed.auto_parallel.process_group import ( + from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) diff --git a/python/paddle/distributed/auto_parallel/converter.py b/python/paddle/distributed/auto_parallel/static/converter.py similarity index 99% rename from python/paddle/distributed/auto_parallel/converter.py rename to python/paddle/distributed/auto_parallel/static/converter.py index 65df19ad69c1743f2dda93e67d56dd3f845851ee..68f571857d7cf38effcf838cd245546b9238e08f 100644 --- a/python/paddle/distributed/auto_parallel/converter.py +++ b/python/paddle/distributed/auto_parallel/static/converter.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from ..utils.log_utils import get_logger +from ...utils.log_utils import get_logger class Converter: diff --git a/python/paddle/distributed/auto_parallel/cost/__init__.py b/python/paddle/distributed/auto_parallel/static/cost/__init__.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cost/__init__.py rename to python/paddle/distributed/auto_parallel/static/cost/__init__.py diff --git a/python/paddle/distributed/auto_parallel/cost/base_cost.py b/python/paddle/distributed/auto_parallel/static/cost/base_cost.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cost/base_cost.py rename to python/paddle/distributed/auto_parallel/static/cost/base_cost.py diff --git a/python/paddle/distributed/auto_parallel/cost/comm_op_cost.py b/python/paddle/distributed/auto_parallel/static/cost/comm_op_cost.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cost/comm_op_cost.py rename to python/paddle/distributed/auto_parallel/static/cost/comm_op_cost.py diff --git a/python/paddle/distributed/auto_parallel/cost/comp_op_cost.py b/python/paddle/distributed/auto_parallel/static/cost/comp_op_cost.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cost/comp_op_cost.py rename to python/paddle/distributed/auto_parallel/static/cost/comp_op_cost.py diff --git a/python/paddle/distributed/auto_parallel/cost/estimate_cost.py b/python/paddle/distributed/auto_parallel/static/cost/estimate_cost.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cost/estimate_cost.py rename to python/paddle/distributed/auto_parallel/static/cost/estimate_cost.py diff --git a/python/paddle/distributed/auto_parallel/cost/tensor_cost.py b/python/paddle/distributed/auto_parallel/static/cost/tensor_cost.py similarity index 97% rename from python/paddle/distributed/auto_parallel/cost/tensor_cost.py rename to python/paddle/distributed/auto_parallel/static/cost/tensor_cost.py index 6567088cae978463f0c461415e04b8486d1f1304..17d3b0476081af26dc2e6ff6b47887a867fa8c6e 100644 --- a/python/paddle/distributed/auto_parallel/cost/tensor_cost.py +++ b/python/paddle/distributed/auto_parallel/static/cost/tensor_cost.py @@ -15,7 +15,9 @@ from functools import reduce import paddle -from paddle.distributed.auto_parallel.dist_tensor import DistributedTensor +from paddle.distributed.auto_parallel.static.dist_tensor import ( + DistributedTensor, +) from paddle.static import Variable from .base_cost import Cost diff --git a/python/paddle/distributed/auto_parallel/cost_model.py b/python/paddle/distributed/auto_parallel/static/cost_model.py similarity index 100% rename from python/paddle/distributed/auto_parallel/cost_model.py rename to python/paddle/distributed/auto_parallel/static/cost_model.py diff --git a/python/paddle/distributed/auto_parallel/dist_attribute.py b/python/paddle/distributed/auto_parallel/static/dist_attribute.py similarity index 100% rename from python/paddle/distributed/auto_parallel/dist_attribute.py rename to python/paddle/distributed/auto_parallel/static/dist_attribute.py diff --git a/python/paddle/distributed/auto_parallel/dist_context.py b/python/paddle/distributed/auto_parallel/static/dist_context.py similarity index 99% rename from python/paddle/distributed/auto_parallel/dist_context.py rename to python/paddle/distributed/auto_parallel/static/dist_context.py index f3418f271825a416d5cac665f1713a869b7ef3f2..df774d79774c95a24ced29fcf07543cf2b3517d0 100644 --- a/python/paddle/distributed/auto_parallel/dist_context.py +++ b/python/paddle/distributed/auto_parallel/static/dist_context.py @@ -18,9 +18,9 @@ from collections import defaultdict from paddle.distributed.passes import PassContext from paddle.framework import IrGraph, core, set_flags +from ..process_mesh import ProcessMesh from .dist_op import DistributedOperator from .dist_tensor import DistributedTensor -from .process_mesh import ProcessMesh from .utils import ( __no_shape_var_type__, _copy_dist_attr_to_cpp, diff --git a/python/paddle/distributed/auto_parallel/dist_loader.py b/python/paddle/distributed/auto_parallel/static/dist_loader.py similarity index 100% rename from python/paddle/distributed/auto_parallel/dist_loader.py rename to python/paddle/distributed/auto_parallel/static/dist_loader.py diff --git a/python/paddle/distributed/auto_parallel/dist_op.py b/python/paddle/distributed/auto_parallel/static/dist_op.py similarity index 100% rename from python/paddle/distributed/auto_parallel/dist_op.py rename to python/paddle/distributed/auto_parallel/static/dist_op.py diff --git a/python/paddle/distributed/auto_parallel/dist_saver.py b/python/paddle/distributed/auto_parallel/static/dist_saver.py similarity index 99% rename from python/paddle/distributed/auto_parallel/dist_saver.py rename to python/paddle/distributed/auto_parallel/static/dist_saver.py index 9e99c58d8487b0741f8ea78ea98d9cbcee279047..26b9c32c92cb2f624b61b54dbbb56f930573ba9c 100644 --- a/python/paddle/distributed/auto_parallel/dist_saver.py +++ b/python/paddle/distributed/auto_parallel/static/dist_saver.py @@ -23,7 +23,7 @@ import numpy as np import paddle from paddle.framework import core -from ..utils.log_utils import get_logger +from ...utils.log_utils import get_logger from .process_group import _g_process_group_map from .utils import get_dist_attr diff --git a/python/paddle/distributed/auto_parallel/dist_tensor.py b/python/paddle/distributed/auto_parallel/static/dist_tensor.py similarity index 100% rename from python/paddle/distributed/auto_parallel/dist_tensor.py rename to python/paddle/distributed/auto_parallel/static/dist_tensor.py diff --git a/python/paddle/distributed/auto_parallel/engine.py b/python/paddle/distributed/auto_parallel/static/engine.py similarity index 99% rename from python/paddle/distributed/auto_parallel/engine.py rename to python/paddle/distributed/auto_parallel/static/engine.py index 7a979a864200a4856f47e22dfd930a75799ebead..4ab2d4a7c9ac6af2ede1d315ea4d620e79198bf9 100644 --- a/python/paddle/distributed/auto_parallel/engine.py +++ b/python/paddle/distributed/auto_parallel/static/engine.py @@ -22,7 +22,7 @@ import random import numpy as np import paddle -import paddle.distributed.auto_parallel.utils as auto_utils +import paddle.distributed.auto_parallel.static.utils as auto_utils from paddle import static, utils from paddle.distributed import fleet from paddle.fluid.executor import _to_name_str @@ -32,7 +32,9 @@ from paddle.framework import core, in_dynamic_mode from paddle.metric import Metric from paddle.static import InputSpec, Operator, Variable, global_scope -from ..utils.log_utils import get_logger +from ...utils.log_utils import get_logger +from ..interface import CollectionNames, fetch, get_collection +from ..strategy import Strategy from .callbacks import config_callbacks from .cluster import Cluster, get_default_cluster from .converter import Converter @@ -45,11 +47,9 @@ from .dist_loader import ( from .dist_op import DistributedOperator from .dist_saver import DistributedSaver from .helper import ProgramHelper -from .interface import CollectionNames, fetch, get_collection from .parallelizer_v2 import Parallelizer from .planner_v2 import Planner from .process_group import get_all_process_groups, new_process_group -from .strategy import Strategy class Engine: diff --git a/python/paddle/distributed/auto_parallel/graph.py b/python/paddle/distributed/auto_parallel/static/graph.py similarity index 100% rename from python/paddle/distributed/auto_parallel/graph.py rename to python/paddle/distributed/auto_parallel/static/graph.py diff --git a/python/paddle/distributed/auto_parallel/helper.py b/python/paddle/distributed/auto_parallel/static/helper.py similarity index 100% rename from python/paddle/distributed/auto_parallel/helper.py rename to python/paddle/distributed/auto_parallel/static/helper.py diff --git a/python/paddle/distributed/auto_parallel/mapper.py b/python/paddle/distributed/auto_parallel/static/mapper.py similarity index 100% rename from python/paddle/distributed/auto_parallel/mapper.py rename to python/paddle/distributed/auto_parallel/static/mapper.py diff --git a/python/paddle/distributed/auto_parallel/operators/__init__.py b/python/paddle/distributed/auto_parallel/static/operators/__init__.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/__init__.py rename to python/paddle/distributed/auto_parallel/static/operators/__init__.py diff --git a/python/paddle/distributed/auto_parallel/operators/common.py b/python/paddle/distributed/auto_parallel/static/operators/common.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/common.py rename to python/paddle/distributed/auto_parallel/static/operators/common.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_assign.py b/python/paddle/distributed/auto_parallel/static/operators/dist_assign.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_assign.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_assign.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py b/python/paddle/distributed/auto_parallel/static/operators/dist_check_finite_and_unscale.py similarity index 99% rename from python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_check_finite_and_unscale.py index 2327793e459b3750aa847cbd67a4ebe85e346860..b397903ee784637bd5d7cc35b26b5459d09f9177 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_check_finite_and_unscale.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_check_finite_and_unscale.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole diff --git a/python/paddle/distributed/auto_parallel/operators/dist_default.py b/python/paddle/distributed/auto_parallel/static/operators/dist_default.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_default.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_default.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_dropout.py b/python/paddle/distributed/auto_parallel/static/operators/dist_dropout.py similarity index 98% rename from python/paddle/distributed/auto_parallel/operators/dist_dropout.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_dropout.py index dde852e613eff52f4de772fdc7d0b2fd24525259..a5af154f385b09cf2c6124ea85caca3930a35695 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_dropout.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_dropout.py @@ -18,10 +18,10 @@ import paddle from paddle.framework import core from paddle.utils import unique_name -from ...utils.log_utils import get_logger +from ....utils.log_utils import get_logger _logger = get_logger(logging.INFO) -from ..random import determinate_rng, is_enable_auto_rand_ctrl +from ...random import determinate_rng, is_enable_auto_rand_ctrl from ..utils import ( naive_set_dist_op_attr_for_program_by_mesh_and_mapping, set_var_dist_attr, diff --git a/python/paddle/distributed/auto_parallel/operators/dist_eltwise.py b/python/paddle/distributed/auto_parallel/static/operators/dist_eltwise.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_eltwise.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_eltwise.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_embedding.py b/python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py similarity index 99% rename from python/paddle/distributed/auto_parallel/operators/dist_embedding.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py index 4f13c89bb14fd63f096012e3684277465f39091c..8e6bbae74df1825fc1146f9f5f0b9d449703601a 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_embedding.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_embedding.py @@ -13,7 +13,7 @@ # limitations under the License from paddle.common_ops_import import check_dtype, check_variable_and_dtype -from paddle.distributed.auto_parallel.cost.comm_op_cost import ( +from paddle.distributed.auto_parallel.static.cost.comm_op_cost import ( AllreduceSumOpCost, IdentityOpCost, ) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_fill_constant_batch_size_like.py b/python/paddle/distributed/auto_parallel/static/operators/dist_fill_constant_batch_size_like.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_fill_constant_batch_size_like.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_fill_constant_batch_size_like.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_flash_attn.py b/python/paddle/distributed/auto_parallel/static/operators/dist_flash_attn.py similarity index 97% rename from python/paddle/distributed/auto_parallel/operators/dist_flash_attn.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_flash_attn.py index 331bdfd25ae0aba86e6ffbca7bde4683588ee75c..2812554eb0a4ab9ce0366279d1354f3b3af6b146 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_flash_attn.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_flash_attn.py @@ -14,10 +14,10 @@ import logging -from ...utils.log_utils import get_logger +from ....utils.log_utils import get_logger _logger = get_logger(logging.INFO) -from ..random import determinate_rng, is_enable_auto_rand_ctrl +from ...random import determinate_rng, is_enable_auto_rand_ctrl from .common import ( DistributedOperatorImplContainer, register_distributed_operator_impl, diff --git a/python/paddle/distributed/auto_parallel/operators/dist_fused_attention.py b/python/paddle/distributed/auto_parallel/static/operators/dist_fused_attention.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_fused_attention.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_fused_attention.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_fused_dropout_add.py b/python/paddle/distributed/auto_parallel/static/operators/dist_fused_dropout_add.py similarity index 98% rename from python/paddle/distributed/auto_parallel/operators/dist_fused_dropout_add.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_fused_dropout_add.py index 12612540a9a67124cb5a10f7b439980b3f22d013..a97309a587d050a0390796016e08f6040c800d48 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_fused_dropout_add.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_fused_dropout_add.py @@ -18,10 +18,10 @@ import paddle from paddle.framework import core from paddle.utils import unique_name -from ...utils.log_utils import get_logger +from ....utils.log_utils import get_logger _logger = get_logger(logging.INFO) -from ..random import determinate_rng, is_enable_auto_rand_ctrl +from ...random import determinate_rng, is_enable_auto_rand_ctrl from ..utils import ( naive_set_dist_op_attr_for_program_by_mesh_and_mapping, set_var_dist_attr, diff --git a/python/paddle/distributed/auto_parallel/operators/dist_fused_feedforward.py b/python/paddle/distributed/auto_parallel/static/operators/dist_fused_feedforward.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_fused_feedforward.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_fused_feedforward.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_matmul.py b/python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py similarity index 99% rename from python/paddle/distributed/auto_parallel/operators/dist_matmul.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py index 8825e14d9aba7da62d52e09d4f228be8afc1e056..28eed81c6bcb061da6c7296050a12699a302db62 100644 --- a/python/paddle/distributed/auto_parallel/operators/dist_matmul.py +++ b/python/paddle/distributed/auto_parallel/static/operators/dist_matmul.py @@ -15,7 +15,7 @@ import copy from paddle.common_ops_import import check_dtype, check_variable_and_dtype -from paddle.distributed.auto_parallel.cost.comm_op_cost import ( +from paddle.distributed.auto_parallel.static.cost.comm_op_cost import ( AllreduceSumOpCost, IdentityOpCost, ) diff --git a/python/paddle/distributed/auto_parallel/operators/dist_pnorm.py b/python/paddle/distributed/auto_parallel/static/operators/dist_pnorm.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_pnorm.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_pnorm.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py b/python/paddle/distributed/auto_parallel/static/operators/dist_reduce_sum_p.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_reduce_sum_p.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_reduce_sum_p.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_reshape.py b/python/paddle/distributed/auto_parallel/static/operators/dist_reshape.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_reshape.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_reshape.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_scale.py b/python/paddle/distributed/auto_parallel/static/operators/dist_scale.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_scale.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_scale.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_shape.py b/python/paddle/distributed/auto_parallel/static/operators/dist_shape.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_shape.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_shape.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_slice.py b/python/paddle/distributed/auto_parallel/static/operators/dist_slice.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_slice.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_slice.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_softmax.py b/python/paddle/distributed/auto_parallel/static/operators/dist_softmax.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_softmax.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_softmax.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_split.py b/python/paddle/distributed/auto_parallel/static/operators/dist_split.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_split.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_split.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_transpose.py b/python/paddle/distributed/auto_parallel/static/operators/dist_transpose.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_transpose.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_transpose.py diff --git a/python/paddle/distributed/auto_parallel/operators/dist_update_loss_scaling.py b/python/paddle/distributed/auto_parallel/static/operators/dist_update_loss_scaling.py similarity index 100% rename from python/paddle/distributed/auto_parallel/operators/dist_update_loss_scaling.py rename to python/paddle/distributed/auto_parallel/static/operators/dist_update_loss_scaling.py diff --git a/python/paddle/distributed/auto_parallel/parallelizer.py b/python/paddle/distributed/auto_parallel/static/parallelizer.py similarity index 100% rename from python/paddle/distributed/auto_parallel/parallelizer.py rename to python/paddle/distributed/auto_parallel/static/parallelizer.py diff --git a/python/paddle/distributed/auto_parallel/parallelizer_v2.py b/python/paddle/distributed/auto_parallel/static/parallelizer_v2.py similarity index 99% rename from python/paddle/distributed/auto_parallel/parallelizer_v2.py rename to python/paddle/distributed/auto_parallel/static/parallelizer_v2.py index 6807016c34f172485cbf3b1c71d80f48545ac1a3..8a5def0ec9d0e31885bdb0646e4fbb973693d4df 100644 --- a/python/paddle/distributed/auto_parallel/parallelizer_v2.py +++ b/python/paddle/distributed/auto_parallel/static/parallelizer_v2.py @@ -20,10 +20,10 @@ from paddle.distributed.passes import PassManager, new_pass from paddle.static import append_backward, program_guard from paddle.utils import unique_name -from ..utils.log_utils import get_logger +from ...utils.log_utils import get_logger +from ..random import init_auto_parallel_rng from .partitioner import Partitioner from .process_group import get_world_process_group -from .random import init_auto_parallel_rng from .reshard import Resharder from .utils import set_grad_var_shape diff --git a/python/paddle/distributed/auto_parallel/partitioner.py b/python/paddle/distributed/auto_parallel/static/partitioner.py similarity index 99% rename from python/paddle/distributed/auto_parallel/partitioner.py rename to python/paddle/distributed/auto_parallel/static/partitioner.py index f542b49fdecbde541aca28ff0c7206950384089f..a0190c3d3c4095be01e17bcb3e64a9bb73822715 100644 --- a/python/paddle/distributed/auto_parallel/partitioner.py +++ b/python/paddle/distributed/auto_parallel/static/partitioner.py @@ -15,8 +15,10 @@ import copy import paddle -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.operators.common import ( get_distributed_operator_impl_container, ) from paddle.framework import Program, core diff --git a/python/paddle/distributed/auto_parallel/planner.py b/python/paddle/distributed/auto_parallel/static/planner.py similarity index 100% rename from python/paddle/distributed/auto_parallel/planner.py rename to python/paddle/distributed/auto_parallel/static/planner.py diff --git a/python/paddle/distributed/auto_parallel/planner_v2.py b/python/paddle/distributed/auto_parallel/static/planner_v2.py similarity index 96% rename from python/paddle/distributed/auto_parallel/planner_v2.py rename to python/paddle/distributed/auto_parallel/static/planner_v2.py index efe154b19004a38478b68ac7cfae44825782888b..f0ac925371055b1cb5e408321a075725f7d31ee3 100755 --- a/python/paddle/distributed/auto_parallel/planner_v2.py +++ b/python/paddle/distributed/auto_parallel/static/planner_v2.py @@ -18,15 +18,17 @@ import pickle import numpy as np -from paddle.distributed.auto_parallel.dist_attribute import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.dist_attribute import ( OperatorDistAttr, TensorDistAttr, ) -from paddle.distributed.auto_parallel.dist_op import DistributedOperator -from paddle.distributed.auto_parallel.dist_tensor import DistributedTensor -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.dist_op import DistributedOperator +from paddle.distributed.auto_parallel.static.dist_tensor import ( + DistributedTensor, +) -from ..utils.log_utils import get_logger +from ...utils.log_utils import get_logger from .completion import Completer from .dist_context import get_default_distributed_context from .tuner.parallel_tuner import ParallelTuner diff --git a/python/paddle/distributed/auto_parallel/process_group.py b/python/paddle/distributed/auto_parallel/static/process_group.py similarity index 98% rename from python/paddle/distributed/auto_parallel/process_group.py rename to python/paddle/distributed/auto_parallel/static/process_group.py index e7d8a758161616353a173691aef8df2857307b40..578ec21e8082b8d12a489eb8022b70346098d58d 100644 --- a/python/paddle/distributed/auto_parallel/process_group.py +++ b/python/paddle/distributed/auto_parallel/static/process_group.py @@ -17,8 +17,8 @@ from collections import OrderedDict import paddle from paddle.framework import core -from ..collective import _get_global_env, _new_ring_id -from ..utils.log_utils import get_logger +from ...collective import _get_global_env, _new_ring_id +from ...utils.log_utils import get_logger from .utils import dygraph_guard logger = get_logger("INFO", __name__) diff --git a/python/paddle/distributed/auto_parallel/process_mesh_v2.py b/python/paddle/distributed/auto_parallel/static/process_mesh_v2.py similarity index 100% rename from python/paddle/distributed/auto_parallel/process_mesh_v2.py rename to python/paddle/distributed/auto_parallel/static/process_mesh_v2.py diff --git a/python/paddle/distributed/auto_parallel/reshard.py b/python/paddle/distributed/auto_parallel/static/reshard.py similarity index 100% rename from python/paddle/distributed/auto_parallel/reshard.py rename to python/paddle/distributed/auto_parallel/static/reshard.py diff --git a/python/paddle/distributed/auto_parallel/topology.py b/python/paddle/distributed/auto_parallel/static/topology.py similarity index 100% rename from python/paddle/distributed/auto_parallel/topology.py rename to python/paddle/distributed/auto_parallel/static/topology.py diff --git a/python/paddle/distributed/auto_parallel/tuner/__init__.py b/python/paddle/distributed/auto_parallel/static/tuner/__init__.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/__init__.py rename to python/paddle/distributed/auto_parallel/static/tuner/__init__.py diff --git a/python/paddle/distributed/auto_parallel/tuner/algorithms.py b/python/paddle/distributed/auto_parallel/static/tuner/algorithms.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/algorithms.py rename to python/paddle/distributed/auto_parallel/static/tuner/algorithms.py diff --git a/python/paddle/distributed/auto_parallel/tuner/config.py b/python/paddle/distributed/auto_parallel/static/tuner/config.py similarity index 99% rename from python/paddle/distributed/auto_parallel/tuner/config.py rename to python/paddle/distributed/auto_parallel/static/tuner/config.py index 78f94b87b360b32f25d2a3b3c4e5c677586780b5..28ab9536b9bccf8a3df4d61ca2cdcfe77f82dc66 100644 --- a/python/paddle/distributed/auto_parallel/tuner/config.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/config.py @@ -15,7 +15,7 @@ import copy import os -from ..strategy import Strategy +from ...strategy import Strategy _tuning_supported_passes = ["sharding", "recompute"] diff --git a/python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py b/python/paddle/distributed/auto_parallel/static/tuner/optimization_tuner.py similarity index 97% rename from python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py rename to python/paddle/distributed/auto_parallel/static/tuner/optimization_tuner.py index b3a925070b320a785429570743f2e99de51fe3b2..8b3d23c68cb1aea67dc7be03623a9ea501d51413 100644 --- a/python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/optimization_tuner.py @@ -27,16 +27,18 @@ import sys import time import paddle -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.process_group import ( clear_all_process_groups, get_all_process_groups, new_process_group, ) -from paddle.distributed.auto_parallel.reshard import Resharder -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.reshard import Resharder +from paddle.distributed.auto_parallel.static.utils import ( debug_program, set_grad_var_shape, ) @@ -465,7 +467,7 @@ class OptimizationTuner: ] ) cmd_args = ( - "-m paddle.distributed.auto_parallel.tuner.profiler" + "-m paddle.distributed.auto_parallel.static.tuner.profiler" + " " + profile_args ) diff --git a/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py b/python/paddle/distributed/auto_parallel/static/tuner/parallel_tuner.py similarity index 99% rename from python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py rename to python/paddle/distributed/auto_parallel/static/tuner/parallel_tuner.py index 4a3f85d6b21daf136db0639db43c988e5102da77..c2c1055663ccc253e4b63d611365a464d4f3d8cf 100644 --- a/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/parallel_tuner.py @@ -21,13 +21,13 @@ from collections import defaultdict import numpy as np +from ...process_mesh import ProcessMesh from ..completion import Completer from ..cost import CostEstimator from ..dist_context import _node_id from ..dist_op import DistributedOperator from ..operators.common import find_compatible_distributed_operator_impls from ..parallelizer_v2 import Parallelizer -from ..process_mesh import ProcessMesh from .trial import Trial, TrialStatus from .tunable_space import TunableSpace from .tunable_variable import Boolean, IntRange diff --git a/python/paddle/distributed/auto_parallel/tuner/profiler.py b/python/paddle/distributed/auto_parallel/static/tuner/profiler.py similarity index 98% rename from python/paddle/distributed/auto_parallel/tuner/profiler.py rename to python/paddle/distributed/auto_parallel/static/tuner/profiler.py index 486db968ee3beffc7c77f2d02ea9823e74fb76a3..55f83b48647aa088a19102edd17faac6b0a35368 100644 --- a/python/paddle/distributed/auto_parallel/tuner/profiler.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/profiler.py @@ -21,10 +21,10 @@ import time import traceback import paddle -from paddle.distributed.auto_parallel.dist_loader import ( +from paddle.distributed.auto_parallel.static.dist_loader import ( DistributedDataLoaderFromGenerator, ) -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.process_group import ( get_all_process_groups, new_process_group, ) diff --git a/python/paddle/distributed/auto_parallel/tuner/recorder.py b/python/paddle/distributed/auto_parallel/static/tuner/recorder.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/recorder.py rename to python/paddle/distributed/auto_parallel/static/tuner/recorder.py diff --git a/python/paddle/distributed/auto_parallel/tuner/rule_based_tuner.py b/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py similarity index 99% rename from python/paddle/distributed/auto_parallel/tuner/rule_based_tuner.py rename to python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py index 5ef0e872933453bbdaf0a2fd1e4fc2df3ddca333..bef30c7ce3aa570c3d8da8fb13c02f7c4bae5bfa 100644 --- a/python/paddle/distributed/auto_parallel/tuner/rule_based_tuner.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py @@ -26,20 +26,24 @@ from functools import reduce import numpy as np import paddle -from paddle.distributed.auto_parallel.cluster_v2 import DeviceMesh -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.cost import CostEstimator -from paddle.distributed.auto_parallel.dist_attribute import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.cluster_v2 import DeviceMesh +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.cost import CostEstimator +from paddle.distributed.auto_parallel.static.dist_attribute import ( OperatorDistAttr, TensorDistAttr, ) -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.dist_tensor import DistributedTensor -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.dist_tensor import ( + DistributedTensor, +) +from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( is_gradient_clip_op, print_program_with_dist_attr, ) @@ -48,7 +52,7 @@ from paddle.fluid import program_guard from paddle.fluid.backward import append_backward from paddle.fluid.framework import Parameter, unique_name -from ...utils.log_utils import get_logger +from ....utils.log_utils import get_logger from ..graph import Graph _PATTERNS = {} diff --git a/python/paddle/distributed/auto_parallel/tuner/storable.py b/python/paddle/distributed/auto_parallel/static/tuner/storable.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/storable.py rename to python/paddle/distributed/auto_parallel/static/tuner/storable.py diff --git a/python/paddle/distributed/auto_parallel/tuner/trial.py b/python/paddle/distributed/auto_parallel/static/tuner/trial.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/trial.py rename to python/paddle/distributed/auto_parallel/static/tuner/trial.py diff --git a/python/paddle/distributed/auto_parallel/tuner/tunable_space.py b/python/paddle/distributed/auto_parallel/static/tuner/tunable_space.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/tunable_space.py rename to python/paddle/distributed/auto_parallel/static/tuner/tunable_space.py diff --git a/python/paddle/distributed/auto_parallel/tuner/tunable_variable.py b/python/paddle/distributed/auto_parallel/static/tuner/tunable_variable.py similarity index 100% rename from python/paddle/distributed/auto_parallel/tuner/tunable_variable.py rename to python/paddle/distributed/auto_parallel/static/tuner/tunable_variable.py diff --git a/python/paddle/distributed/auto_parallel/utils.py b/python/paddle/distributed/auto_parallel/static/utils.py similarity index 99% rename from python/paddle/distributed/auto_parallel/utils.py rename to python/paddle/distributed/auto_parallel/static/utils.py index d5a196a080d6cceee22d3eb8ce8952748e3f849d..fa9aeacd001b9c6695349209c57785a9bdd6a109 100644 --- a/python/paddle/distributed/auto_parallel/utils.py +++ b/python/paddle/distributed/auto_parallel/static/utils.py @@ -27,8 +27,8 @@ from paddle.framework import core from paddle.framework.io_utils import is_belong_to_optimizer, is_parameter from paddle.static import Variable +from ..process_mesh import ProcessMesh from .dist_attribute import OperatorDistAttr, TensorDistAttr -from .process_mesh import ProcessMesh OpRole = core.op_proto_and_checker_maker.OpRole OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName() @@ -1868,7 +1868,7 @@ def get_lr(optimizer): def initialize_pg_in_full_mode(all_process_groups, cur_rank): import socket - from ..collective import _get_global_env + from ...collective import _get_global_env has_recv_by_socket = [] # This is a magic number @@ -1946,7 +1946,7 @@ def is_recompute_op(op): def set_recompute_segments(model, losses, strategy, program): - from ..passes.auto_parallel_recompute import RecomputeState + from ...passes.auto_parallel_recompute import RecomputeState if not losses: return @@ -2054,7 +2054,7 @@ def validate_opt(optimizer): def set_data_parallel(x): - from .interface import ProcessMesh, shard_tensor + from ..interface import ProcessMesh, shard_tensor from .process_group import get_world_process_group world_ranks = get_world_process_group().ranks @@ -2095,7 +2095,7 @@ def _copy_tensor_dist_attr_to_cpp(cpp_dist_attr, py_dist_attr): def _copy_tensor_dist_attr_from_cpp(cpp_dist_attr, py_dist_attr): - from .process_mesh import ProcessMesh + from ..process_mesh import ProcessMesh cpp_process_mesh = cpp_dist_attr.process_mesh if cpp_process_mesh is not None: @@ -2128,7 +2128,7 @@ def _copy_op_dist_attr_to_cpp(cpp_dist_attr, py_dist_attr): def _copy_op_dist_attr_from_cpp(cpp_dist_attr, py_dist_attr): - from .process_mesh import ProcessMesh + from ..process_mesh import ProcessMesh cpp_process_mesh = cpp_dist_attr.process_mesh if cpp_process_mesh is not None: diff --git a/python/paddle/distributed/fleet/fleet.py b/python/paddle/distributed/fleet/fleet.py index 39948ab28e6ef1599466d2adb96ce4f58ca83d78..de003916b7d2558c0b275a990a85f81d7059743b 100755 --- a/python/paddle/distributed/fleet/fleet.py +++ b/python/paddle/distributed/fleet/fleet.py @@ -1335,7 +1335,7 @@ class Fleet: self._user_defined_strategy.semi_auto or self._user_defined_strategy.auto_search ): - from ..auto_parallel.parallelizer import AutoParallelizer + from ..auto_parallel.static.parallelizer import AutoParallelizer auto_parallelizer = AutoParallelizer(self) ( diff --git a/python/paddle/distributed/passes/auto_parallel_amp.py b/python/paddle/distributed/passes/auto_parallel_amp.py index def5156f811aa2f53c4495c0caa908542e1b281d..a6f12af17fa5b1546bc079748f82314c12705b58 100644 --- a/python/paddle/distributed/passes/auto_parallel_amp.py +++ b/python/paddle/distributed/passes/auto_parallel_amp.py @@ -13,11 +13,13 @@ # limitations under the License. import paddle -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistAttr -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.dist_attribute import ( + OperatorDistAttr, +) +from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( naive_set_dist_op_attr_for_program_by_mesh_and_mapping, set_var_dist_attr, ) @@ -42,7 +44,7 @@ from paddle.static.amp.fp16_utils import ( from paddle.utils import unique_name from ..auto_parallel.process_mesh import ProcessMesh -from ..auto_parallel.utils import ( +from ..auto_parallel.static.utils import ( is_backward_op, is_forward_op, is_loss_grad_op, diff --git a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py index 5d519bcc94e06b544284d899c9253063018215e0..a371792c5198d8208927e5850e592332cb60a4f4 100644 --- a/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py +++ b/python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py @@ -15,16 +15,16 @@ from collections import OrderedDict import paddle -from paddle.distributed.auto_parallel.dist_attribute import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.dist_attribute import ( OperatorDistAttr, TensorDistAttr, ) -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.operators.common import ( is_data_parallel_reduce_op, is_data_parallel_scale_op, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( find_higher_order_backward_op, get_var_numel, insert_dependencies_for_vars, diff --git a/python/paddle/distributed/passes/auto_parallel_fp16.py b/python/paddle/distributed/passes/auto_parallel_fp16.py index 6a763ce15030f709c721d306977436e9d2055313..8da9edb34258dcf3d8fb29611f56d9d2533453fc 100644 --- a/python/paddle/distributed/passes/auto_parallel_fp16.py +++ b/python/paddle/distributed/passes/auto_parallel_fp16.py @@ -16,11 +16,13 @@ from collections import defaultdict import paddle from paddle.common_ops_import import check_type, check_variable_and_dtype -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistAttr -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.dist_attribute import ( + OperatorDistAttr, +) +from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( is_backward_op, is_forward_op, naive_set_dist_op_attr_for_program_by_mesh_and_mapping, diff --git a/python/paddle/distributed/passes/auto_parallel_grad_clip.py b/python/paddle/distributed/passes/auto_parallel_grad_clip.py index 481ba3b6c3113142d437fbea1ef6f27d5befca98..bda2b557fc54c1b2f61ac1d8f55d145ab46e6304 100644 --- a/python/paddle/distributed/passes/auto_parallel_grad_clip.py +++ b/python/paddle/distributed/passes/auto_parallel_grad_clip.py @@ -19,18 +19,21 @@ import numpy as np import paddle from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole -from ..auto_parallel.dist_attribute import OperatorDistAttr, TensorDistAttr -from ..auto_parallel.operators.common import ( +from ..auto_parallel.process_mesh import ProcessMesh +from ..auto_parallel.static.dist_attribute import ( + OperatorDistAttr, + TensorDistAttr, +) +from ..auto_parallel.static.operators.common import ( SyncMode, is_data_parallel_reduce_op, ) -from ..auto_parallel.process_group import ( +from ..auto_parallel.static.process_group import ( get_all_process_groups, get_world_process_group, ) -from ..auto_parallel.process_mesh import ProcessMesh -from ..auto_parallel.reshard import Resharder -from ..auto_parallel.utils import ( +from ..auto_parallel.static.reshard import Resharder +from ..auto_parallel.static.utils import ( _get_comm_group, insert_dependencies_for_vars, is_gradient_clip_op, diff --git a/python/paddle/distributed/passes/auto_parallel_gradient_merge.py b/python/paddle/distributed/passes/auto_parallel_gradient_merge.py index 4bf460d1b42889c6c5c9663e0eb7a551ae463ff9..8a87ac7f599d2f20c8c127723cf31c1e9c056948 100644 --- a/python/paddle/distributed/passes/auto_parallel_gradient_merge.py +++ b/python/paddle/distributed/passes/auto_parallel_gradient_merge.py @@ -15,11 +15,11 @@ from typing import Any, Dict, List, Tuple import paddle -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.process_group import ( get_world_process_group, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( is_optimize_op, naive_set_dist_op_attr_for_program_by_mesh_and_mapping, set_var_dist_attr, diff --git a/python/paddle/distributed/passes/auto_parallel_quantization.py b/python/paddle/distributed/passes/auto_parallel_quantization.py index f2f35b33728bb7732932b01d8dca57bc1f9298e8..759e79680fc5149348a65ed5f384168acf03c903 100644 --- a/python/paddle/distributed/passes/auto_parallel_quantization.py +++ b/python/paddle/distributed/passes/auto_parallel_quantization.py @@ -26,8 +26,11 @@ from paddle.static.quantization import ( quant_config, ) -from ..auto_parallel.converter import Converter -from ..auto_parallel.dist_attribute import OperatorDistAttr, TensorDistAttr +from ..auto_parallel.static.converter import Converter +from ..auto_parallel.static.dist_attribute import ( + OperatorDistAttr, + TensorDistAttr, +) from .pass_base import PassBase, register_pass TRANSFORM_PASS_OP_TYPES = list( diff --git a/python/paddle/distributed/passes/auto_parallel_recompute.py b/python/paddle/distributed/passes/auto_parallel_recompute.py index 5de90af8e2e9ad1c197333a4d8995bd25f94e736..d64e8df305f75a61fc201acdadfe7e836418ed23 100644 --- a/python/paddle/distributed/passes/auto_parallel_recompute.py +++ b/python/paddle/distributed/passes/auto_parallel_recompute.py @@ -26,8 +26,8 @@ from paddle.fluid.backward import ( from paddle.framework import core from paddle.utils import unique_name -from ..auto_parallel.dist_attribute import OperatorDistAttr -from ..auto_parallel.utils import ( +from ..auto_parallel.static.dist_attribute import OperatorDistAttr +from ..auto_parallel.static.utils import ( get_loss_op, insert_dependencies_for_two_ops, is_backward_op, diff --git a/python/paddle/distributed/passes/auto_parallel_sharding.py b/python/paddle/distributed/passes/auto_parallel_sharding.py index 44045155cb7737f2413c11b7d57d09e69489f9a2..ac1d7fd8f071f6147941d718b7a4a0113e2b3ef0 100644 --- a/python/paddle/distributed/passes/auto_parallel_sharding.py +++ b/python/paddle/distributed/passes/auto_parallel_sharding.py @@ -16,13 +16,15 @@ import logging from functools import reduce import paddle -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.operators.common import ( ParallelMode, is_data_parallel_reduce_op, is_parameter_related, ) -from paddle.distributed.auto_parallel.process_group import new_process_group -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.process_group import ( + new_process_group, +) +from paddle.distributed.auto_parallel.static.utils import ( _get_comm_group, get_logger, get_var_numel, diff --git a/python/paddle/distributed/passes/auto_parallel_supplement_explicit_dependencies.py b/python/paddle/distributed/passes/auto_parallel_supplement_explicit_dependencies.py index c164b6e8ddbc478216e725a602d1c39f4764d758..7bd4024fa70d4d0b2d1067ad8115ab9bf9e92516 100644 --- a/python/paddle/distributed/passes/auto_parallel_supplement_explicit_dependencies.py +++ b/python/paddle/distributed/passes/auto_parallel_supplement_explicit_dependencies.py @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.operators.common import ( is_amp_flag_sync_op, is_data_parallel_reduce_op, is_global_norm_sync_op, ) -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( OpRole, insert_dependencies_for_vars, ) diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 1635c7d5d211bb292df06ce20aa331ece1efd1b6..a0864992c4e1c83b85aaa4e687f3fadc346dbae1 100755 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -1439,7 +1439,7 @@ def _append_backward_ops_( ) else: default_ctx = getattr( - paddle.distributed.auto_parallel.dist_context, + paddle.distributed.auto_parallel.static.dist_context, '_g_default_distributed_context', None, ) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 904a30f64fa98c4e3d5ab01e19ade273f4102016..38b62736e58bbaa8e66ea64005bdb8a427b50287 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1681,7 +1681,7 @@ class Variable(metaclass=VariableMetaClass): if self.persistable: var_str = "persist " + var_str - from paddle.distributed.auto_parallel.dist_context import ( + from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) @@ -3137,7 +3137,7 @@ class Operator: if i != len(attr_names) - 1: attrs_str += ", " - from paddle.distributed.auto_parallel.dist_context import ( + from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel_autoconvert.py b/python/paddle/fluid/tests/unittests/auto_parallel_autoconvert.py index 554c578f8508cd93b2c495434695c7727240fcd0..2a947adc030200fa9e18a7151793e7d652e47a3b 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel_autoconvert.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel_autoconvert.py @@ -22,10 +22,10 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.dist_context import ( set_default_distributed_context, ) -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( get_dist_attr, load_checkpoint_into_program, load_distributed_checkpoint, diff --git a/python/paddle/fluid/tests/unittests/auto_parallel_save_load.py b/python/paddle/fluid/tests/unittests/auto_parallel_save_load.py index 1ef9634f8db2a3e8af0740e57645e31f92d7f349..3f862705fedde54afac04471f5b7a0dd9aca01fe 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel_save_load.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel_save_load.py @@ -23,7 +23,7 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( load_checkpoint_into_program, save_distributed_checkpoint, ) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_save_for_auto_infer.py b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_save_for_auto_infer.py index a2a9c9113271be5faedc09d8b42bfa87459396ca..16ede226d200750874611200a8f9a07b05e9a7f0 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_save_for_auto_infer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/dygraph_save_for_auto_infer.py @@ -25,7 +25,7 @@ import numpy as np import paddle from paddle import distributed as dist from paddle.distributed import fleet -from paddle.distributed.auto_parallel import engine +from paddle.distributed.auto_parallel.static import engine from paddle.distributed.fleet.layers.mpu.mp_layers import ( ColumnParallelLinear, RowParallelLinear, diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_cluster.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_cluster.py index d1104c2ce5931d2da5eb52e0d54ad469142d4159..84606eb1216e2e7e6fa9b4b66bdae82ec0c5275a 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_cluster.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_cluster.py @@ -17,7 +17,7 @@ import os import tempfile import unittest -from paddle.distributed.auto_parallel.cluster import ( +from paddle.distributed.auto_parallel.static.cluster import ( Cluster, DeviceType, LinkType, diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion.py index 810f99e0dd7cf78e6dc128fea1509f5f3618a362..103651728f8b07c38413782a5cbf2badb847e66e 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion.py @@ -18,8 +18,10 @@ import unittest.mock import paddle import paddle.nn.functional as F from paddle import nn, static, tensor, utils -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) from paddle.distributed.fleet import auto paddle.enable_static() @@ -188,7 +190,7 @@ class TestMLPAutoCompletion(unittest.TestCase): # # dist_context) # dist_context.finalize_distributed_attr_for_program( # complete_train_program) - # from paddle.distributed.auto_parallel.interface import _g_process_mesh_map + # from paddle.distributed.auto_parallel.static.interface import _g_process_mesh_map # for block in complete_train_program.blocks: # for tensor in block.vars.values(): # desc = tensor.desc diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py index d136aa6adb54da133c8eaca18052981395545bcf..cc09ac989e1da1edc8ae7981ae0770feb98b42b5 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_completion_gpt.py @@ -18,8 +18,10 @@ import unittest import paddle import paddle.nn.functional as F from paddle import nn, static, tensor, utils -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) from paddle.distributed.fleet import auto from paddle.fluid import layers from paddle.nn.layer.transformer import _convert_param_attr_to_list diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_cost_model.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_cost_model.py index 5746df433fe4651914980c8d09365a6a057296c0..7cf8b2d399f1288a56b72982c200fa9e65332a34 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_cost_model.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_cost_model.py @@ -18,12 +18,16 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.cost_model import estimate_cost -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.reshard import Resharder +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.cost_model import estimate_cost +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto from paddle.fluid import core diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_dist_tensor.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_dist_tensor.py index 95b7f95c98ce6e62ccb66947237745631f4cb176..420e8b7f526e8223f15d6445b76bf854d599bb77 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_dist_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_dist_tensor.py @@ -20,12 +20,20 @@ from test_auto_parallel_reshard import mlp_forward import paddle from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_attribute import TensorDistAttr -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.dist_tensor import DistributedTensor -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_attribute import ( + TensorDistAttr, +) +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.dist_tensor import ( + DistributedTensor, +) +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner from paddle.distributed.fleet import auto diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_graph.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_graph.py index b8628f671c022af3516006725d813eabee5d2af2..a9b1fa973f7754c624a95d9856cef6a776d34e2c 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_graph.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_graph.py @@ -14,7 +14,7 @@ import unittest -from paddle.distributed.auto_parallel.graph import Graph +from paddle.distributed.auto_parallel.static.graph import Graph class TestAutoParallelGraph(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py index 11f20b68939ec833eac92b1bc1a60e4996e91fe9..cae7c24a1614b15f868768bb2bf48ca8a8636936 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py @@ -23,17 +23,21 @@ import paddle import paddle.nn.functional as F from paddle import fluid, nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.mapper import ( +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.mapper import ( get_comm_volume, get_dtype_bytes, mapping, ) -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.reshard import Resharder +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto from paddle.fluid import core diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner.py index 33db190dfc6b3240073eec6ce778d9547135e36b..71b6a7b7a2db79d0c9a00449f159099dd3c8d967 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner.py @@ -19,11 +19,15 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, tensor, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.process_group import new_process_group -from paddle.distributed.auto_parallel.utils import _get_comm_group +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.process_group import ( + new_process_group, +) +from paddle.distributed.auto_parallel.static.utils import _get_comm_group from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py index 3e058bfb18e5456190cf5ff0a115b83b3e954cc0..038f1b4854b3580307889b98e6ac5e2fd1a731ce 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py @@ -18,11 +18,15 @@ import unittest import paddle import paddle.nn.functional as F from paddle import nn, static, tensor, utils -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.process_group import new_process_group -from paddle.distributed.auto_parallel.utils import _get_comm_group +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.process_group import ( + new_process_group, +) +from paddle.distributed.auto_parallel.static.utils import _get_comm_group from paddle.distributed.fleet import auto from paddle.fluid import layers from paddle.nn.layer.transformer import _convert_param_attr_to_list diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard.py index 4698667b985fd660c14ba9d1a55d27fe0233feac..4af3fc831abe451d778bd0f573192ab8127f81f9 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard.py @@ -18,15 +18,19 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.process_group import ( +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.process_group import ( ProcessGroup, _g_process_group_map, ) -from paddle.distributed.auto_parallel.reshard import Resharder +from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_dpmppp.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_dpmppp.py index e59cfa1a1f12ca6c2bd7f5923e9717b6a0f778de..b8afece8001cb52936e4d920fc5f2247962439cf 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_dpmppp.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_dpmppp.py @@ -18,11 +18,15 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.reshard import Resharder +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_mppp.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_mppp.py index 33acd0172920097a726a5fea46d56cf680f6ae37..ebc7b95290e691d55f2d98d618dc0ceab7e66e6e 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_mppp.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_mppp.py @@ -18,13 +18,17 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.cost import CostEstimator -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.reshard import Resharder +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.cost import CostEstimator +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py index 11c817b9baeea85cde0a9ede689851fe585ae382..2ff75315725793a2be9190d5df1f4a7d85e074d9 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_reshard_serial.py @@ -22,7 +22,7 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) from paddle.distributed.fleet import auto @@ -80,6 +80,7 @@ class MLPLayer(nn.Layer): def mlp_forward(train_program, start_program): + print("mlp_forward outer", flush=True) with static.program_guard( train_program, start_program ), utils.unique_name.guard(): @@ -99,6 +100,7 @@ def mlp_forward(train_program, start_program): elif _global_parallel_strategy == "dp": auto.shard_tensor(input, _global_process_mesh, ["x", None]) else: + print("mlp_forward inner", flush=True) auto.shard_tensor(input, _global_process_mesh, [None, None]) mlp = MLPLayer( @@ -128,10 +130,14 @@ def get_dist_prog_with_parallelizer( dist_strategy.semi_auto = True fleet.init(is_collective=True, strategy=dist_strategy) + print("mlp_forward before", flush=True) + loss, train_program, startup_program = mlp_forward( train_program, startup_program ) + print("mlp_forward after", flush=True) + optimizer = paddle.fluid.optimizer.AdamOptimizer( learning_rate=0.00001, beta1=0.9, @@ -185,6 +191,7 @@ def check_send_recv_result(dist_main_prog, rank_id): ) class TestMLPReshard(unittest.TestCase): def test_mlp_serial(self): + print("################-0") global _global_parallel_strategy _global_parallel_strategy = None global _global_process_mesh diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_searcher.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_searcher.py index 277072a24e2fb4a03a396cdd24b1f4b1d3483216..d5bfd5889428d6c7b9061f2a7bb9ff7fc3e7e962 100755 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_searcher.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_searcher.py @@ -17,13 +17,15 @@ import unittest import paddle import paddle.nn.functional as F from paddle import nn, static, utils -from paddle.distributed.auto_parallel.dist_attribute import ( +from paddle.distributed.auto_parallel.static.dist_attribute import ( OperatorDistAttr, TensorDistAttr, ) -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.planner import PlanSpace -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.planner import PlanSpace +from paddle.distributed.auto_parallel.static.utils import ( update_op_dims_mapping_by_default_dist_impl, update_op_dims_mapping_by_elementwise_like_dist_impl, ) @@ -177,8 +179,10 @@ class TestMLPSearcher(unittest.TestCase): set_default_dist_attr(train_program, dist_context, global_process_mesh) ops = train_program.global_block().ops vars = train_program.global_block().vars - from paddle.distributed.auto_parallel.dist_op import DistributedOperator - from paddle.distributed.auto_parallel.operators.common import ( + from paddle.distributed.auto_parallel.static.dist_op import ( + DistributedOperator, + ) + from paddle.distributed.auto_parallel.static.operators.common import ( get_distributed_operator_impl_container, is_elementwise_op, ) diff --git a/python/paddle/fluid/tests/unittests/test_auto_search_dist_matmul_op.py b/python/paddle/fluid/tests/unittests/test_auto_search_dist_matmul_op.py index c9d7d6346ca8f17adf70a3046c391b05cecae3d7..a1c1f86bb1f3e525a9b749628426548189c5d0ab 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_search_dist_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_auto_search_dist_matmul_op.py @@ -16,9 +16,11 @@ import unittest import paddle import paddle.nn.functional as F from paddle import nn, static, utils -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistAttr -from paddle.distributed.auto_parallel.dist_op import DistributedOperator -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.dist_attribute import ( + OperatorDistAttr, +) +from paddle.distributed.auto_parallel.static.dist_op import DistributedOperator +from paddle.distributed.auto_parallel.static.operators.common import ( get_distributed_operator_impl_container, ) from paddle.framework import core diff --git a/python/paddle/fluid/tests/unittests/test_auto_search_dist_op.py b/python/paddle/fluid/tests/unittests/test_auto_search_dist_op.py index 19da767fcf970067400ebbdd32716ba142622d1f..369fdec36e55a6e966b10e4dc8716fc276a73e4f 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_search_dist_op.py +++ b/python/paddle/fluid/tests/unittests/test_auto_search_dist_op.py @@ -16,9 +16,11 @@ import unittest import paddle import paddle.nn.functional as F from paddle import nn, static, utils -from paddle.distributed.auto_parallel.dist_attribute import OperatorDistAttr -from paddle.distributed.auto_parallel.dist_op import DistributedOperator -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.dist_attribute import ( + OperatorDistAttr, +) +from paddle.distributed.auto_parallel.static.dist_op import DistributedOperator +from paddle.distributed.auto_parallel.static.operators.common import ( get_distributed_operator_impl_container, ) from paddle.fluid import core diff --git a/python/setup.py.in b/python/setup.py.in index 9a6517a7d55f2caac4735f024bd7aa320e04712f..3e6fdb00679204ddb75fde3ad90e60608fac7eda 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -426,9 +426,11 @@ packages=['paddle', 'paddle.distributed.fleet.meta_parallel.sharding', 'paddle.distributed.fleet.meta_parallel.parallel_layers', 'paddle.distributed.auto_parallel', - 'paddle.distributed.auto_parallel.operators', - 'paddle.distributed.auto_parallel.tuner', - 'paddle.distributed.auto_parallel.cost', + 'paddle.distributed.auto_parallel.dygraph', + 'paddle.distributed.auto_parallel.static', + 'paddle.distributed.auto_parallel.static.operators', + 'paddle.distributed.auto_parallel.static.tuner', + 'paddle.distributed.auto_parallel.static.cost', 'paddle.distributed.passes', 'paddle.distributed.models', 'paddle.distributed.models.moe', diff --git a/setup.py b/setup.py index f8858321ae6e983d4dd449cb6ce3e26c6ea1a4d4..ae8cf524bafba3e5733a67e42ec317beda0e083b 100644 --- a/setup.py +++ b/setup.py @@ -1430,9 +1430,11 @@ def get_setup_parameters(): 'paddle.distributed.fleet.meta_parallel.sharding', 'paddle.distributed.fleet.meta_parallel.parallel_layers', 'paddle.distributed.auto_parallel', - 'paddle.distributed.auto_parallel.operators', - 'paddle.distributed.auto_parallel.tuner', - 'paddle.distributed.auto_parallel.cost', + 'paddle.distributed.auto_parallel.dygraph', + 'paddle.distributed.auto_parallel.static', + 'paddle.distributed.auto_parallel.static.operators', + 'paddle.distributed.auto_parallel.static.tuner', + 'paddle.distributed.auto_parallel.static.cost', 'paddle.distributed.passes', 'paddle.distributed.models', 'paddle.distributed.models.moe', diff --git a/test/auto_parallel/amp_o2_pass.py b/test/auto_parallel/amp_o2_pass.py index 767b95c808330625148433983d3d6c893f12edda..04af0112e31cc9a08fe58af75a3981efa614c0be 100644 --- a/test/auto_parallel/amp_o2_pass.py +++ b/test/auto_parallel/amp_o2_pass.py @@ -120,7 +120,10 @@ class TestShardingStage2WithNewEXE(unittest.TestCase): # bf16 mp_bf16_engine = self.get_engine(use_amp=True) - if not paddle.is_compiled_with_cuda() or get_cuda_version() < 11000: + if not ( + paddle.amp.is_bfloat16_supported() + and paddle.device.cuda.get_device_capability()[0] >= 8 + ): return mp_bf16_history = mp_bf16_engine.fit( diff --git a/test/auto_parallel/auto_parallel_relaunch_with_gpt_planner.py b/test/auto_parallel/auto_parallel_relaunch_with_gpt_planner.py index cd11f2fabf77078257caccc70649ccd1909ac824..6f61cafbcd883041533daf916a15c65d1c5cdf7f 100644 --- a/test/auto_parallel/auto_parallel_relaunch_with_gpt_planner.py +++ b/test/auto_parallel/auto_parallel_relaunch_with_gpt_planner.py @@ -20,7 +20,7 @@ import paddle from paddle import static from paddle.distributed import fleet -sys.path.append("..") +sys.path.append("../legacy_test") import auto_parallel_gpt_model as modeling from auto_parallel_gpt_model import ( GPTForPretraining, @@ -151,7 +151,7 @@ def train(): }, fetch_list=[loss], ) - print(f"step: {step}, loss: {loss_print[0]:f}") + print(f"step: {step}, loss: {loss_print:f}") else: exe.run( distributed_main_program, diff --git a/test/auto_parallel/auto_parallel_relaunch_with_planner.py b/test/auto_parallel/auto_parallel_relaunch_with_planner.py index 00b769d8c7d08e0d88e5d3b8e8ff9ce237c4214d..4ad1dfb196581d29778b0e1259c8f65b661cbf9b 100644 --- a/test/auto_parallel/auto_parallel_relaunch_with_planner.py +++ b/test/auto_parallel/auto_parallel_relaunch_with_planner.py @@ -15,9 +15,9 @@ import paddle from paddle import static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.cost import CostEstimator -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.cost import CostEstimator +from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) diff --git a/test/auto_parallel/converter.py b/test/auto_parallel/converter.py index 5e0506c3785db53e869cb0dda622c97997c84b0e..411900eaa42cc7719e96f9d961ccaea94b0479da 100644 --- a/test/auto_parallel/converter.py +++ b/test/auto_parallel/converter.py @@ -15,7 +15,7 @@ import numpy as np import paddle -from paddle.distributed.auto_parallel.converter import Converter +from paddle.distributed.auto_parallel.static.converter import Converter def test_convert(): diff --git a/test/auto_parallel/test_align_tool.py b/test/auto_parallel/test_align_tool.py index c0c331b0d7fc4830af1f122599d26f18542ecedd..500b11c78916ce608913f7ce20811eabaa1ac314 100644 --- a/test/auto_parallel/test_align_tool.py +++ b/test/auto_parallel/test_align_tool.py @@ -20,7 +20,9 @@ import numpy as np import paddle from paddle import fluid, nn, optimizer, static -from paddle.distributed.auto_parallel.auto_align_tool import AutoAlignTool +from paddle.distributed.auto_parallel.static.auto_align_tool import ( + AutoAlignTool, +) from paddle.vision.datasets import MNIST warnings.filterwarnings("ignore") diff --git a/test/auto_parallel/test_base_cost.py b/test/auto_parallel/test_base_cost.py index 01a488e2db34095b008f44a5d1da9b86bf9e305c..c9e3e64c6a8dfe9c75cb993060b5baee9c72526b 100644 --- a/test/auto_parallel/test_base_cost.py +++ b/test/auto_parallel/test_base_cost.py @@ -23,21 +23,25 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.cost import ( +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.cost import ( AllreduceSumOpCost, _g_op_cost_factory, ) -from paddle.distributed.auto_parallel.cost.base_cost import ( +from paddle.distributed.auto_parallel.static.cost.base_cost import ( build_comm_costs_from_descs, build_comm_desc_from_dist_op, build_comp_costs_from_descs, build_comp_desc_from_dist_op, build_dp_costs, ) -from paddle.distributed.auto_parallel.dist_context import DistributedContext -from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) +from paddle.distributed.auto_parallel.static.parallelizer import ( + AutoParallelizer, +) from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/test/auto_parallel/test_cluster.py b/test/auto_parallel/test_cluster.py index c25b6013fa16240dbd5646d97501ee35a6ccb984..679b3f8a3cd0d3a54dbeb6a2b298c89ffba4952d 100644 --- a/test/auto_parallel/test_cluster.py +++ b/test/auto_parallel/test_cluster.py @@ -17,7 +17,7 @@ import os import tempfile import unittest -from paddle.distributed.auto_parallel.cluster import ( +from paddle.distributed.auto_parallel.static.cluster import ( Cluster, get_default_cluster, ) diff --git a/test/auto_parallel/test_cluster_partition.py b/test/auto_parallel/test_cluster_partition.py index 9071b481eb5c473b5ffd8ff4d11c7d2e5ba96930..25087ff16271a1b098287a03b1b61d90370c3129 100644 --- a/test/auto_parallel/test_cluster_partition.py +++ b/test/auto_parallel/test_cluster_partition.py @@ -18,7 +18,7 @@ import unittest class TestClusterPartition(unittest.TestCase): def test_cluster_partition(self): clusters = [(5, 8), (1, 8), (4, 8), (16, 8), (2, 8), (3, 8)] - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( ClusterPartitionUtil, ) diff --git a/test/auto_parallel/test_cluster_v2.py b/test/auto_parallel/test_cluster_v2.py index 3f10fb95b846a488c978d314a45ceaa8791a997e..671db9708e654d11a2484ac474b027cefc3097da 100644 --- a/test/auto_parallel/test_cluster_v2.py +++ b/test/auto_parallel/test_cluster_v2.py @@ -14,7 +14,7 @@ import unittest -from paddle.distributed.auto_parallel.cluster_v2 import DeviceMesh +from paddle.distributed.auto_parallel.static.cluster_v2 import DeviceMesh from paddle.framework import core diff --git a/test/auto_parallel/test_comm_cost.py b/test/auto_parallel/test_comm_cost.py index 0f664947f2760c032ae976ff8f64c0aa52882235..734cbf8ff6a115715c7366ceceb33a5fb6872b2e 100644 --- a/test/auto_parallel/test_comm_cost.py +++ b/test/auto_parallel/test_comm_cost.py @@ -20,8 +20,8 @@ import unittest from test_cluster import cluster_json, multi_cluster_json import paddle -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.cost import ( +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.cost import ( AllgatherOpCost, AllreduceSumOpCost, BroadcastOpCost, diff --git a/test/auto_parallel/test_comp_cost.py b/test/auto_parallel/test_comp_cost.py index c4e4502e5026375465ef59664dfb4c0af1d89d43..7afb077b7e186c5dcddf067c4f60b38dc0ac14d7 100644 --- a/test/auto_parallel/test_comp_cost.py +++ b/test/auto_parallel/test_comp_cost.py @@ -18,8 +18,8 @@ import unittest from test_cluster import cluster_json -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.cost.comp_op_cost import ( +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.cost.comp_op_cost import ( AssignOpCost, AssignValueOpCost, BeamSearchDecodeOpCost, diff --git a/test/auto_parallel/test_convert_to_process_meshes.py b/test/auto_parallel/test_convert_to_process_meshes.py index 120a7ba438a4066aa992fb777136dc22fbfd4566..472719aef56a692bc9188d9d735dee1056034525 100644 --- a/test/auto_parallel/test_convert_to_process_meshes.py +++ b/test/auto_parallel/test_convert_to_process_meshes.py @@ -18,7 +18,7 @@ import unittest class TestConvertToProcessMeshes(unittest.TestCase): def test_convert_to_process_meshes(self): device_meshes = [[1, 8], [4, 8], [15, 8]] - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( convert_to_process_meshes, ) diff --git a/test/auto_parallel/test_converter.py b/test/auto_parallel/test_converter.py index edd888acf6984740beba207721d39dc28022d065..f6b95011fc9f34d502f86393837f4ca4f0332163 100644 --- a/test/auto_parallel/test_converter.py +++ b/test/auto_parallel/test_converter.py @@ -18,7 +18,7 @@ import sys import tempfile import unittest -from paddle.distributed.auto_parallel.converter import Converter +from paddle.distributed.auto_parallel.static.converter import Converter class TestConverter(unittest.TestCase): diff --git a/test/auto_parallel/test_dist_assign.py b/test/auto_parallel/test_dist_assign.py index 87064a45a49ffb678a8e4974b6f3a07700e9e031..b7cdb0d6b7f0f81f5db8db2e641cf3f218791b94 100644 --- a/test/auto_parallel/test_dist_assign.py +++ b/test/auto_parallel/test_dist_assign.py @@ -38,9 +38,11 @@ def make_program(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_dist_attr_v2.py b/test/auto_parallel/test_dist_attr_v2.py index 1d15c34221f9088d621e7b94c6ab7faed397e3dd..37f13f5af9d422b4c12d15771f3127253138fb71 100644 --- a/test/auto_parallel/test_dist_attr_v2.py +++ b/test/auto_parallel/test_dist_attr_v2.py @@ -21,12 +21,12 @@ import paddle import paddle.nn.functional as F from paddle import nn, static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, set_default_distributed_context, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh -from paddle.distributed.auto_parallel.utils import ( +from paddle.distributed.auto_parallel.static.utils import ( _copy_dist_attr_from_cpp, _copy_dist_attr_from_cpp_for_graph, _copy_dist_attr_to_cpp, diff --git a/test/auto_parallel/test_dist_context.py b/test/auto_parallel/test_dist_context.py index 2944b2db2a3fb23e7687f3a0163890508782a7a7..695949fd698c0fa86dd7e2126a9aad23c97e3598 100644 --- a/test/auto_parallel/test_dist_context.py +++ b/test/auto_parallel/test_dist_context.py @@ -21,7 +21,9 @@ import paddle import paddle.nn.functional as F from paddle import nn, static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.dist_context import DistributedContext +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/test/auto_parallel/test_dist_matmul.py b/test/auto_parallel/test_dist_matmul.py index 0a07b98de705d82f333c9b41ddeee19eaf5f9439..77c15942709c25bb9ca9ad63a62f4ff16451d9ec 100644 --- a/test/auto_parallel/test_dist_matmul.py +++ b/test/auto_parallel/test_dist_matmul.py @@ -103,9 +103,11 @@ def matmulv2_dp2mp2(init_x, init_y, trans_x, trans_y): def parallelizer(program_func, *args, **kwargs): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program, loss = program_func(*args, **kwargs) diff --git a/test/auto_parallel/test_dist_op_cost.py b/test/auto_parallel/test_dist_op_cost.py index ecff2bbf8935b569ce3a14618346713c4b792d2a..4d7cca7e5b3329b6b6800d60bf7b59bb90de2008 100644 --- a/test/auto_parallel/test_dist_op_cost.py +++ b/test/auto_parallel/test_dist_op_cost.py @@ -16,8 +16,8 @@ import copy import unittest import paddle -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.operators.common import ( get_distributed_operator_impl_container, is_elementwise_op, ) @@ -29,8 +29,10 @@ paddle.enable_static() def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) main_program, startup_program, loss = program_func() diff --git a/test/auto_parallel/test_dist_pnorm.py b/test/auto_parallel/test_dist_pnorm.py index 5ff30d27b6d6e5ff746ce70fc28c3378aeec1d32..623114208150c51bc2f5cd1f1055135fef04aa87 100644 --- a/test/auto_parallel/test_dist_pnorm.py +++ b/test/auto_parallel/test_dist_pnorm.py @@ -75,9 +75,11 @@ def make_program_serial(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program, loss = program_func() diff --git a/test/auto_parallel/test_dist_reshape.py b/test/auto_parallel/test_dist_reshape.py index 8dd84da9175c41142ea536b91af4500c417a9b0a..743cda599e40a313e774bed0837e3f163eddd943 100644 --- a/test/auto_parallel/test_dist_reshape.py +++ b/test/auto_parallel/test_dist_reshape.py @@ -37,9 +37,11 @@ def make_program_dp2(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_dist_scale.py b/test/auto_parallel/test_dist_scale.py index b68131e361ec04469bae594309b35b23bfde841a..270f6951ece2ef21ecca6846d3c57d63499d6453 100644 --- a/test/auto_parallel/test_dist_scale.py +++ b/test/auto_parallel/test_dist_scale.py @@ -34,9 +34,11 @@ def make_program(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_dist_shape.py b/test/auto_parallel/test_dist_shape.py index 0322a817934fdfd1132eab3489c996438b3fa270..6bc33e82dac8df352ab6f03fa8bcfe0459d3c9f1 100644 --- a/test/auto_parallel/test_dist_shape.py +++ b/test/auto_parallel/test_dist_shape.py @@ -34,9 +34,11 @@ def make_program(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_dist_slice.py b/test/auto_parallel/test_dist_slice.py index cdca9904d622b7b7951da87fb6e59c0b4de74bbb..e94dcf32f7bf9ccb49789b519977b8efed9e059f 100644 --- a/test/auto_parallel/test_dist_slice.py +++ b/test/auto_parallel/test_dist_slice.py @@ -56,9 +56,11 @@ def make_program_serial(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_dist_split.py b/test/auto_parallel/test_dist_split.py index edc711ea4c8410e7376667c40db55a6c6fc38b6c..b44d180685edd83ab083c397831352b0ae6a5d8f 100644 --- a/test/auto_parallel/test_dist_split.py +++ b/test/auto_parallel/test_dist_split.py @@ -34,9 +34,11 @@ def make_program_dp2(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_engine_callbacks.py b/test/auto_parallel/test_engine_callbacks.py index d62cff86245816f4a012df3e85ab31da672a6236..f00d62cc035bf17f2452675633f83b619f1f6d51 100644 --- a/test/auto_parallel/test_engine_callbacks.py +++ b/test/auto_parallel/test_engine_callbacks.py @@ -20,7 +20,7 @@ import unittest import paddle import paddle.vision.transforms as T -from paddle.distributed.auto_parallel.callbacks import config_callbacks +from paddle.distributed.auto_parallel.static.callbacks import config_callbacks from paddle.distributed.fleet import auto from paddle.static import InputSpec from paddle.vision.datasets import MNIST diff --git a/test/auto_parallel/test_fp16_assign.py b/test/auto_parallel/test_fp16_assign.py index eb34226ac89187563e70dff0018ba4064881b953..b1a13d81148f0710b1afcac1a2a7cfd0145b390b 100644 --- a/test/auto_parallel/test_fp16_assign.py +++ b/test/auto_parallel/test_fp16_assign.py @@ -64,9 +64,11 @@ def make_program(): def parallelizer(program_func, rank): - from paddle.distributed.auto_parallel.completion import Completer - from paddle.distributed.auto_parallel.dist_context import DistributedContext - from paddle.distributed.auto_parallel.partitioner import Partitioner + from paddle.distributed.auto_parallel.static.completion import Completer + from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, + ) + from paddle.distributed.auto_parallel.static.partitioner import Partitioner main_program, start_program = program_func() diff --git a/test/auto_parallel/test_group_operators.py b/test/auto_parallel/test_group_operators.py index 6dea719a11180a87d30915a675ec20f0e267832a..aec75934e5e900c782d903ebf1e4590f79a470a8 100644 --- a/test/auto_parallel/test_group_operators.py +++ b/test/auto_parallel/test_group_operators.py @@ -112,10 +112,10 @@ class TestGroupOperators(unittest.TestCase): sequence_len, vocab_size, ) - from paddle.distributed.auto_parallel.dist_context import ( + from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, ) - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( RuleBasedTuner, ) diff --git a/test/auto_parallel/test_interface.py b/test/auto_parallel/test_interface.py index 3d57049410acf53553e4492d1d0d203d99b94e68..5ea4209a6253b8c19033b07b456408559d20d7ba 100644 --- a/test/auto_parallel/test_interface.py +++ b/test/auto_parallel/test_interface.py @@ -17,10 +17,10 @@ import unittest import paddle import paddle.nn.functional as F from paddle import nn, static -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/test/auto_parallel/test_new_cost_model.py b/test/auto_parallel/test_new_cost_model.py index 8439df7ae88bb239712b98340a04a12de54f15b4..b3e9016e4d20b780b9814ee1ca2e073941cf1e30 100644 --- a/test/auto_parallel/test_new_cost_model.py +++ b/test/auto_parallel/test_new_cost_model.py @@ -20,10 +20,10 @@ import unittest from test_cluster import cluster_json import paddle -import paddle.distributed.auto_parallel.cost as cost_model -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.cost import CommContext -from paddle.distributed.auto_parallel.cost.base_cost import ( +import paddle.distributed.auto_parallel.static.cost as cost_model +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.cost import CommContext +from paddle.distributed.auto_parallel.static.cost.base_cost import ( build_comp_desc_from_op, build_comp_desc_str_for_predict, calc_time_by_modeling, diff --git a/test/auto_parallel/test_parallel_tuner.py b/test/auto_parallel/test_parallel_tuner.py index 258bf0c398b2a5760a48baae919f32f3233a4714..76203cbfc9ad9846965dac855a7bfbdeff469abd 100644 --- a/test/auto_parallel/test_parallel_tuner.py +++ b/test/auto_parallel/test_parallel_tuner.py @@ -18,13 +18,15 @@ import unittest import paddle from paddle import static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, set_default_distributed_context, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh -from paddle.distributed.auto_parallel.tuner.parallel_tuner import ParallelTuner +from paddle.distributed.auto_parallel.static.tuner.parallel_tuner import ( + ParallelTuner, +) sys.path.append("../legacy_test") import auto_parallel_gpt_model as modeling diff --git a/test/auto_parallel/test_parallel_tuner_full.py b/test/auto_parallel/test_parallel_tuner_full.py index 7df76ef097e064bb85dcf253e98725759de696f0..181f77b0eb9dde33cad698ffbce39201b77b19e2 100644 --- a/test/auto_parallel/test_parallel_tuner_full.py +++ b/test/auto_parallel/test_parallel_tuner_full.py @@ -18,15 +18,17 @@ import unittest import paddle from paddle import static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, set_default_distributed_context, ) -from paddle.distributed.auto_parallel.planner_v2 import Planner -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.planner_v2 import Planner +from paddle.distributed.auto_parallel.static.tuner.parallel_tuner import ( + ParallelTuner, +) from paddle.distributed.auto_parallel.strategy import Strategy -from paddle.distributed.auto_parallel.tuner.parallel_tuner import ParallelTuner sys.path.append("../legacy_test") import auto_parallel_gpt_model as modeling diff --git a/test/auto_parallel/test_parallel_tuner_predict.py b/test/auto_parallel/test_parallel_tuner_predict.py index 1e3c6ea87e8f25719d0cda57ace1d5463b8aba32..63b9186c0c8acd119788ecd7105ae1e57e5caa12 100644 --- a/test/auto_parallel/test_parallel_tuner_predict.py +++ b/test/auto_parallel/test_parallel_tuner_predict.py @@ -18,13 +18,15 @@ import unittest import paddle from paddle import static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.cluster import Cluster -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.process_mesh import ProcessMesh +from paddle.distributed.auto_parallel.static.cluster import Cluster +from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, set_default_distributed_context, ) -from paddle.distributed.auto_parallel.process_mesh import ProcessMesh -from paddle.distributed.auto_parallel.tuner.parallel_tuner import ParallelTuner +from paddle.distributed.auto_parallel.static.tuner.parallel_tuner import ( + ParallelTuner, +) sys.path.append("../legacy_test") import auto_parallel_gpt_model as modeling diff --git a/test/auto_parallel/test_pattern.py b/test/auto_parallel/test_pattern.py index bdccc68d984fc5d189d7e151754c8d2659d49276..1f7e89c08c52ca8d1f08fbf73c62d0483d4f30d7 100644 --- a/test/auto_parallel/test_pattern.py +++ b/test/auto_parallel/test_pattern.py @@ -112,7 +112,7 @@ class TestGroupOperatorsAndPatterns(unittest.TestCase): sequence_len, vocab_size, ) - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( _PATTERNS, GraphUtil, ) diff --git a/test/auto_parallel/test_pattern_match.py b/test/auto_parallel/test_pattern_match.py index c240969ef9ddc689e5fdedf001fc994b3d7b6db7..0bbf7af68a0cc78310ab73413e029a65d4aed452 100644 --- a/test/auto_parallel/test_pattern_match.py +++ b/test/auto_parallel/test_pattern_match.py @@ -112,10 +112,10 @@ class TestPatternMatch(unittest.TestCase): sequence_len, vocab_size, ) - from paddle.distributed.auto_parallel.dist_context import ( + from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, ) - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( GraphUtil, RuleBasedTuner, ) diff --git a/test/auto_parallel/test_prim_dist_op.py b/test/auto_parallel/test_prim_dist_op.py index 5a4a1b5a512a3dfac59682c8f845828ba987c137..b92f550d41fa3036cfef36a30cbf2c4cfbdf2ba8 100644 --- a/test/auto_parallel/test_prim_dist_op.py +++ b/test/auto_parallel/test_prim_dist_op.py @@ -15,13 +15,13 @@ import unittest import paddle -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, get_default_distributed_context, ) -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.utils import set_var_dist_attr +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.utils import set_var_dist_attr from paddle.distributed.fleet import auto from paddle.fluid.layer_helper import LayerHelper from paddle.incubate.autograd import enable_prim diff --git a/test/auto_parallel/test_process_mesh.py b/test/auto_parallel/test_process_mesh.py index 07da754e7970f131a924c37c4a551d9d7b0bc337..d4b91a5dcc345d412da5df056ea85a248cfe1282 100644 --- a/test/auto_parallel/test_process_mesh.py +++ b/test/auto_parallel/test_process_mesh.py @@ -19,14 +19,14 @@ import numpy as np import paddle import paddle.nn.functional as F from paddle import nn, static -from paddle.distributed.auto_parallel.dist_context import ( - get_default_distributed_context, -) from paddle.distributed.auto_parallel.process_mesh import ( ProcessMesh, compute_compatible_process_mesh, merge_process_meshes, ) +from paddle.distributed.auto_parallel.static.dist_context import ( + get_default_distributed_context, +) paddle.enable_static() diff --git a/test/auto_parallel/test_process_mesh_v2.py b/test/auto_parallel/test_process_mesh_v2.py index 03ec95c71870f7dd639e28fa21a6b82130824435..0d98caad3a7bf015d07da43b636c72b5ecb2644a 100644 --- a/test/auto_parallel/test_process_mesh_v2.py +++ b/test/auto_parallel/test_process_mesh_v2.py @@ -14,7 +14,7 @@ import unittest -from paddle.distributed.auto_parallel.process_mesh_v2 import ( +from paddle.distributed.auto_parallel.static.process_mesh_v2 import ( ProcessMesh, compute_compatible_process_mesh, merge_process_mesh, diff --git a/test/auto_parallel/test_recorder.py b/test/auto_parallel/test_recorder.py index eaaefcbe0733c28a44f856e908313e59f47a4fa8..185d3d3ef3d5083212ae96b390611fc33038ba2b 100644 --- a/test/auto_parallel/test_recorder.py +++ b/test/auto_parallel/test_recorder.py @@ -16,7 +16,7 @@ import unittest import numpy as np -from paddle.distributed.auto_parallel.tuner import recorder as rd +from paddle.distributed.auto_parallel.static.tuner import recorder as rd class TestRecorder(unittest.TestCase): diff --git a/test/auto_parallel/test_rule_based_tuner.py b/test/auto_parallel/test_rule_based_tuner.py index a3ef694b5c3628e15fc3d874ef16bbe2e6d3db67..7c4c980fd992ebc540d404c92d4bfdfedca49afb 100644 --- a/test/auto_parallel/test_rule_based_tuner.py +++ b/test/auto_parallel/test_rule_based_tuner.py @@ -112,11 +112,11 @@ class TestRuleBasedTuner(unittest.TestCase): sequence_len, vocab_size, ) - from paddle.distributed.auto_parallel.cluster import Cluster - from paddle.distributed.auto_parallel.dist_context import ( + from paddle.distributed.auto_parallel.static.cluster import Cluster + from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, ) - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( RuleBasedTuner, ) diff --git a/test/auto_parallel/test_rule_based_tuner_o2.py b/test/auto_parallel/test_rule_based_tuner_o2.py index 999535d72044808e07bdca905197749bed8dc3e1..5fdb1fc83e96afd4cc3a23e8aa07bf6264eb961c 100644 --- a/test/auto_parallel/test_rule_based_tuner_o2.py +++ b/test/auto_parallel/test_rule_based_tuner_o2.py @@ -112,11 +112,11 @@ class TestRuleBasedTuner(unittest.TestCase): sequence_len, vocab_size, ) - from paddle.distributed.auto_parallel.cluster import Cluster - from paddle.distributed.auto_parallel.dist_context import ( + from paddle.distributed.auto_parallel.static.cluster import Cluster + from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, ) - from paddle.distributed.auto_parallel.tuner.rule_based_tuner import ( + from paddle.distributed.auto_parallel.static.tuner.rule_based_tuner import ( RuleBasedTuner, ) diff --git a/test/auto_parallel/test_serialization.py b/test/auto_parallel/test_serialization.py index d89c9596f4cdb01f33c0df8677ff252639971e62..495f3adf62024350f8e2b9122cfb564a0222f5da 100644 --- a/test/auto_parallel/test_serialization.py +++ b/test/auto_parallel/test_serialization.py @@ -20,11 +20,11 @@ import paddle import paddle.nn.functional as F from paddle import nn, static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, set_default_distributed_context, ) -from paddle.distributed.auto_parallel.process_mesh_v2 import ProcessMesh +from paddle.distributed.auto_parallel.static.process_mesh_v2 import ProcessMesh from paddle.distributed.fleet import auto from paddle.fluid.core import TensorDistAttr from paddle.fluid.framework import Program diff --git a/test/auto_parallel/test_to_static.py b/test/auto_parallel/test_to_static.py index 2057d509ad10534fb4b1647e234df0ca41bfaa71..1550c2d2669f00420b9f07927bf62d2bebc26440 100644 --- a/test/auto_parallel/test_to_static.py +++ b/test/auto_parallel/test_to_static.py @@ -19,7 +19,10 @@ import numpy as np import paddle import paddle.nn.functional as F from paddle import LazyGuard, nn -from paddle.distributed.auto_parallel.helper import ProgramHelper, ProxyLayer +from paddle.distributed.auto_parallel.static.helper import ( + ProgramHelper, + ProxyLayer, +) from paddle.distributed.fleet import auto from paddle.framework import in_dynamic_mode from paddle.io import Dataset diff --git a/test/auto_parallel/test_topology.py b/test/auto_parallel/test_topology.py index 6807d22ffc3f1588ffe7b3199df222739103b56a..0119821532e2632abd8238882a78ae10553ce74f 100644 --- a/test/auto_parallel/test_topology.py +++ b/test/auto_parallel/test_topology.py @@ -14,7 +14,7 @@ import unittest -from paddle.distributed.auto_parallel.topo import SingleNodeTopology +from paddle.distributed.auto_parallel.static.topo import SingleNodeTopology def check_empty_json_object(json_object): diff --git a/test/auto_parallel/test_trial.py b/test/auto_parallel/test_trial.py index 5fcf38b2e65e6cca9d0fafa3438d8a1fa52407f6..7861ab82f8f8872465a36fa930de082ddd617654 100644 --- a/test/auto_parallel/test_trial.py +++ b/test/auto_parallel/test_trial.py @@ -14,8 +14,8 @@ import unittest -from paddle.distributed.auto_parallel.tuner import trial as tr -from paddle.distributed.auto_parallel.tuner import tunable_space as ts +from paddle.distributed.auto_parallel.static.tuner import trial as tr +from paddle.distributed.auto_parallel.static.tuner import tunable_space as ts class TestTiral(unittest.TestCase): diff --git a/test/auto_parallel/test_tunable_space.py b/test/auto_parallel/test_tunable_space.py index badc90275fd38ad06c2fc1310b87218bdf4aad89..b32e96107b54d43c80714b6d83c38ea979e706a1 100644 --- a/test/auto_parallel/test_tunable_space.py +++ b/test/auto_parallel/test_tunable_space.py @@ -14,7 +14,7 @@ import unittest -from paddle.distributed.auto_parallel.tuner import tunable_space as ts +from paddle.distributed.auto_parallel.static.tuner import tunable_space as ts class TestTunableSpace(unittest.TestCase): diff --git a/test/auto_parallel/test_tunable_variable.py b/test/auto_parallel/test_tunable_variable.py index 641f7b4347e36f70681ef176acb6468e70aec972..208ecf7238ffa38ac19488f3dad9593c2a463613 100644 --- a/test/auto_parallel/test_tunable_variable.py +++ b/test/auto_parallel/test_tunable_variable.py @@ -14,7 +14,7 @@ import unittest -from paddle.distributed.auto_parallel.tuner import tunable_variable as tv +from paddle.distributed.auto_parallel.static.tuner import tunable_variable as tv class TestTunableVariable(unittest.TestCase): diff --git a/test/auto_parallel/test_while_op_completion.py b/test/auto_parallel/test_while_op_completion.py index 3f9b5b151ab08ef6e9ac064fab2d1eaac5455226..67887916c666252132e0fe96668faa7ee1ab522e 100644 --- a/test/auto_parallel/test_while_op_completion.py +++ b/test/auto_parallel/test_while_op_completion.py @@ -20,8 +20,10 @@ import paddle import paddle.nn.functional as F from paddle import nn, static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import DistributedContext +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( + DistributedContext, +) from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/test/auto_parallel/test_while_op_partition.py b/test/auto_parallel/test_while_op_partition.py index 00f3a70bbcf42cffb934bc6afdce68a3bf47ee55..ef3189542cb5d0a7ab14c3dbbe3b86874c603fc9 100644 --- a/test/auto_parallel/test_while_op_partition.py +++ b/test/auto_parallel/test_while_op_partition.py @@ -20,12 +20,12 @@ import paddle import paddle.nn.functional as F from paddle import fluid, nn, static from paddle.distributed import fleet -from paddle.distributed.auto_parallel.completion import Completer -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.completion import Completer +from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) -from paddle.distributed.auto_parallel.partitioner import Partitioner -from paddle.distributed.auto_parallel.utils import make_data_unshard +from paddle.distributed.auto_parallel.static.partitioner import Partitioner +from paddle.distributed.auto_parallel.static.utils import make_data_unshard from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/test/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py b/test/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py index aa989df70257934cda6b7e623774d1136798263e..33672c3fa7f2113ae69b3f5cca71760d872e94ac 100644 --- a/test/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py +++ b/test/distributed_passes/test_auto_parallel_data_parallel_optimization_pass.py @@ -23,10 +23,10 @@ from auto_parallel_pass_test_base import AutoPallelPassTestBase import paddle from paddle.distributed import fleet -from paddle.distributed.auto_parallel.dist_context import ( +from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) -from paddle.distributed.auto_parallel.operators.common import ( +from paddle.distributed.auto_parallel.static.operators.common import ( is_data_parallel_reduce_op, ) from paddle.distributed.passes import PassContext, new_pass