You need to sign in or sign up before continuing.
未验证 提交 12c17448 编写于 作者: Y Yulong Ao 提交者: GitHub

[Auto Parallel] Change the import way of Auto Parallel (#46115)

上级 440b96d3
......@@ -81,7 +81,7 @@ class Engine:
import paddle
import paddle.vision.transforms as T
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.vision.datasets import MNIST
transform = T.Compose([
......@@ -540,7 +540,7 @@ class Engine:
import paddle
import paddle.vision.transforms as T
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.vision.datasets import MNIST
transform = T.Compose([
......@@ -663,7 +663,7 @@ class Engine:
import paddle
import paddle.vision.transforms as T
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.vision.datasets import MNIST
transform = T.Compose([
......@@ -771,7 +771,7 @@ class Engine:
import paddle
import paddle.vision.transforms as T
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.vision.datasets import MNIST
transform = T.Compose([
......@@ -1041,7 +1041,7 @@ class Engine:
.. code-block:: python
import paddle
import paddle.vision.transforms as T
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.vision.datasets import MNIST
transform = T.Compose([
......@@ -1107,7 +1107,7 @@ class Engine:
.. code-block:: python
import paddle
import paddle.vision.transforms as T
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.vision.datasets import MNIST
transform = T.Compose([
......
......@@ -55,7 +55,7 @@ def shard_tensor(x, process_mesh=None, shard_spec=None):
.. code-block:: python
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
x = paddle.ones([4, 6])
......@@ -129,7 +129,7 @@ def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None):
.. code-block:: python
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
x = paddle.ones([4, 6])
y = paddle.zeros([4, 6])
......
......@@ -22,7 +22,7 @@ from collections import OrderedDict
import numpy as np
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from .cost_model import estimate_cost
from .dist_op import DistributedOperator
from .process_group import _g_process_group_map
......
......@@ -130,7 +130,7 @@ class Strategy(BaseConfig):
.. code-block:: python
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
strategy = auto.Strategy()
sharding = strategy.sharding
......
......@@ -90,3 +90,5 @@ distributed_model = distributed_model
shrink = fleet.shrink
get_hybrid_communicate_group = fleet.get_hybrid_communicate_group
distributed_scaler = distributed_scaler
from .. import auto_parallel as auto
......@@ -18,7 +18,7 @@ import random
import numpy as np
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import generate_model, create_data_holder, FakeDataset
......
......@@ -28,7 +28,7 @@ import paddle.utils as utils
from paddle.fluid import layers
from paddle.io import IterableDataset, DataLoader
from paddle.distributed import fleet
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
paddle.enable_static()
_global_parallel_strategy = None
......
......@@ -19,7 +19,7 @@ import sys
import numpy as np
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from auto_parallel_relaunch_model import mlp_pretrain_forward
from auto_parallel_relaunch_model import batch_generator_creator
......
......@@ -18,7 +18,7 @@ import random
import numpy as np
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import generate_model, create_data_holder, FakeDataset
......
......@@ -28,7 +28,7 @@ import paddle.utils as utils
from paddle.fluid import layers
from paddle.io import Dataset, IterableDataset, DataLoader
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.optimizer.lr import CosineAnnealingDecay
from paddle.fluid.dataloader.collate import default_collate_fn
......
......@@ -28,7 +28,7 @@ import paddle.utils as utils
from paddle.fluid import layers
from paddle.io import Dataset, DataLoader
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
paddle.enable_static()
batch_size = 2
......
......@@ -17,7 +17,7 @@ import numpy as np
import random
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
sys.path.append("..")
import auto_parallel_gpt_model as modeling
......
......@@ -18,7 +18,7 @@ import random
import numpy as np
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import generate_model, create_data_holder, FakeDataset
......
......@@ -16,7 +16,7 @@ import random
import paddle
import unittest
import numpy as np
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.incubate.autograd import Hessian
np.random.seed(1234)
......
......@@ -29,7 +29,7 @@ from paddle.fluid import layers
from paddle.io import Dataset, IterableDataset, DataLoader
from paddle.static import InputSpec
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.optimizer.lr import CosineAnnealingDecay
from paddle.fluid.dataloader.collate import default_collate_fn
......
......@@ -28,7 +28,7 @@ import paddle.utils as utils
from paddle.fluid import layers
from paddle.io import Dataset, IterableDataset, DataLoader
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from engine_api_dp import MyDataset
paddle.enable_static()
......
......@@ -18,7 +18,7 @@ import random
import numpy as np
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import generate_model, create_data_holder, FakeDataset
......
......@@ -18,7 +18,7 @@ import random
import numpy as np
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import generate_model, create_data_holder, FakeDataset
......
......@@ -24,7 +24,7 @@ import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F
import paddle.utils as utils
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed import fleet
......
......@@ -25,7 +25,7 @@ import paddle.static as static
import paddle.nn.functional as F
from paddle.distributed import fleet
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
......
......@@ -14,7 +14,7 @@
import unittest
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward
......
......@@ -14,7 +14,7 @@
import unittest
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward
......
......@@ -16,7 +16,7 @@ import unittest
import copy
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.operators.common import get_distributed_operator_impl_container, is_elementwise_op
......
......@@ -14,7 +14,7 @@
import unittest
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward
......
......@@ -14,7 +14,7 @@
import unittest
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward
......
......@@ -14,7 +14,7 @@
import unittest
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
paddle.enable_static()
......
......@@ -21,7 +21,7 @@ import paddle.nn as nn
import paddle.nn.functional as F
import paddle.static as static
import paddle.distributed as dist
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context
from paddle.distributed.auto_parallel.process_mesh import ProcessMesh
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
......
......@@ -20,7 +20,7 @@ import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
import paddle.distributed.fleet as fleet
from paddle.io import Dataset
......
......@@ -18,7 +18,7 @@ import random
import numpy as np
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from get_gpt_model import generate_model, create_data_holder, FakeDataset
paddle.enable_static()
......
......@@ -14,13 +14,13 @@
import unittest
import paddle
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.fluid import program_guard
from paddle.incubate.autograd import prim2orig, enable_prim, prim_enabled
from paddle.fluid.layer_helper import LayerHelper
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.utils import set_var_dist_attr
......
......@@ -19,7 +19,7 @@ import paddle.fluid as fluid
import paddle.nn as nn
import paddle.nn.functional as F
import paddle.static as static
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.process_mesh import ProcessMesh
from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
......
......@@ -15,7 +15,7 @@
import os
# import yaml
import unittest
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
class TestStrategy(unittest.TestCase):
......
......@@ -20,7 +20,7 @@ import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
import paddle.distributed.fleet as fleet
from paddle import LazyGuard
......
......@@ -19,7 +19,7 @@ import paddle.nn as nn
import paddle.utils as utils
import paddle.static as static
import paddle.nn.functional as F
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed import fleet
from paddle.distributed.auto_parallel.completion import Completer
......
......@@ -20,7 +20,7 @@ import paddle.utils as utils
import paddle.fluid as fluid
import paddle.static as static
import paddle.nn.functional as F
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed import fleet
from paddle.distributed.auto_parallel.completion import Completer
......
......@@ -25,7 +25,7 @@ import paddle.nn as nn
import paddle.utils as utils
import paddle.static as static
import paddle.nn.functional as F
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed import fleet
from paddle.fluid.initializer import NumpyArrayInitializer
......
......@@ -23,7 +23,7 @@ import random
import paddle
import paddle.nn as nn
import paddle.fluid.core as core
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
import paddle.nn.functional as F
from paddle.distributed import fleet
......
......@@ -22,7 +22,7 @@ import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import paddle.tensor as tensor
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle import fluid
from paddle.fluid import layers
from paddle.distributed import fleet
......
......@@ -25,7 +25,7 @@ import paddle.nn as nn
import paddle.utils as utils
import paddle.static as static
import paddle.nn.functional as F
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed import fleet
from paddle.fluid.initializer import NumpyArrayInitializer
......
......@@ -23,7 +23,7 @@ import paddle.nn.functional as F
import paddle.utils as utils
from paddle.fluid import layers
from paddle.distributed import fleet
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
import paddle.fluid.core as core
......
......@@ -25,7 +25,7 @@ from collections import OrderedDict
from dist_pass_test_base import DistPassTestBase
import paddle.distributed.fleet as fleet
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
sys.path.append("..")
import auto_parallel_gpt_model as modeling
......
......@@ -20,7 +20,7 @@ import unittest
import paddle
import paddle.nn as nn
import paddle.distributed.fleet as fleet
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context
from paddle.distributed.passes import new_pass, PassManager, PassContext
from auto_parallel_pass_test_base import AutoPallelPassTestBase
......
......@@ -26,7 +26,7 @@ import paddle.utils as utils
import paddle.static as static
import paddle.nn.functional as F
import paddle.distributed.fleet as fleet
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.fluid.initializer import NumpyArrayInitializer
from auto_parallel_pass_test_base import AutoPallelPassTestBase
......
......@@ -20,7 +20,7 @@ import unittest
import paddle
import paddle.nn as nn
import paddle.distributed.fleet as fleet
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.passes import new_pass, PassManager
from auto_parallel_pass_test_base import AutoPallelPassTestBase
......
......@@ -20,7 +20,7 @@ import unittest
import paddle
import paddle.nn as nn
import paddle.distributed.fleet as fleet
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.passes import new_pass, PassManager
from auto_parallel_pass_test_base import AutoPallelPassTestBase
......
......@@ -26,7 +26,7 @@ import paddle.utils as utils
import paddle.tensor as tensor
from paddle.fluid import layers
from paddle.nn.layer.transformer import _convert_param_attr_to_list
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.utils import check_distributed_attr_for_program
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
......
......@@ -30,7 +30,7 @@ from paddle.nn.layer.transformer import _convert_param_attr_to_list
from paddle.fluid.initializer import Normal, Constant, NumpyArrayInitializer
from paddle.distributed.fleet import fleet
import paddle.static as static
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.utils import check_distributed_attr_for_program
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
......
......@@ -22,7 +22,7 @@ import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F
import paddle.utils as utils
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed import fleet
......
......@@ -17,7 +17,7 @@ import unittest
import paddle
from paddle.fluid import core
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed import fleet
from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer
......
......@@ -36,7 +36,7 @@ from paddle.nn.layer.transformer import _convert_param_attr_to_list
from paddle.fluid.initializer import Normal, Constant, NumpyArrayInitializer
from paddle.distributed import fleet
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer
from paddle.distributed.auto_parallel.dist_context import DistributedContext
......
......@@ -27,7 +27,7 @@ import paddle.utils as utils
import paddle.tensor as tensor
from paddle.fluid import layers
from paddle.nn.layer.transformer import _convert_param_attr_to_list
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.utils import check_distributed_attr_for_program
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
......
......@@ -30,7 +30,7 @@ from paddle.nn.layer.transformer import _convert_param_attr_to_list
from paddle.fluid.initializer import Normal, Constant, NumpyArrayInitializer
from paddle.distributed import fleet
import paddle.static as static
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.utils import check_distributed_attr_for_program
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
......
......@@ -21,7 +21,7 @@ import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F
import paddle.utils as utils
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed import fleet
......
......@@ -21,7 +21,7 @@ import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F
import paddle.utils as utils
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed import fleet
......
......@@ -21,7 +21,7 @@ import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F
import paddle.utils as utils
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed import fleet
......
......@@ -25,7 +25,7 @@ import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F
import paddle.utils as utils
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context
from paddle.distributed import fleet
from paddle.distributed.auto_parallel.partitioner import Partitioner
......
......@@ -25,7 +25,7 @@ import paddle.static as static
import paddle.nn.functional as F
import paddle.utils as utils
from paddle.distributed import fleet
import paddle.distributed.auto_parallel as auto
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.utils import SerialProgramInfo
from paddle.distributed.auto_parallel.planner import PlanSpace, PlanFilter
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册