未验证 提交 cfd7ff8f 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][isort] introducing `isort` (part1) (#46475)

* add isort config

* isort all files
上级 34fd65cf
无相关合并请求
......@@ -60,6 +60,10 @@ repos:
hooks:
- id: black
files: (.*\.(py|pyi|bzl)|BUILD|.*\.BUILD|WORKSPACE)$
- repo: https://github.com/pycqa/isort
rev: 5.10.1
hooks:
- id: isort
- repo: https://github.com/PyCQA/flake8
rev: 4.0.1
hooks:
......
......@@ -2,3 +2,30 @@
exclude = "build"
line-length = 80
skip-string-normalization = true
[tool.isort]
profile = "black"
line_length = 80
known_first_party = ["paddle"]
skip = ["build", "__init__.py"]
extend_skip_glob = [
# These files do not need to be formatted,
# see .flake8 for more details
"python/paddle/fluid/[!t]**",
"python/paddle/fluid/tra**",
"*_pb2.py",
"python/paddle/utils/gast/**",
"python/paddle/fluid/tests/unittests/npu/**",
"python/paddle/fluid/tests/unittests/mlu/**",
# These files will be fixed in the future
"cmake/**",
"paddle/**",
"r/**",
"tools/**",
"python/paddle/[!f]**",
"python/paddle/fluid/tests/unittests/[t-z]**",
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_error.py",
"python/paddle/fluid/tests/unittests/dygraph_to_static/**",
"python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py",
]
......@@ -13,14 +13,17 @@
# limitations under the License.
from typing import Sequence
import numpy as np
import paddle
from .tensor.attribute import is_floating_point, is_integer
from .tensor.creation import _real_to_complex_dtype, _complex_to_real_dtype
from .fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from . import _C_ops, _legacy_C_ops
from .fluid.data_feeder import check_variable_and_dtype
from .fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from .fluid.layer_helper import LayerHelper
from .tensor.attribute import is_floating_point, is_integer
from .tensor.creation import _complex_to_real_dtype, _real_to_complex_dtype
__all__ = [
'fft',
......
......@@ -12,14 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.fluid as fluid
import paddle
import contextlib
import math
import numpy as np
import sys
import os
import sys
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
def convolution_net(
......
......@@ -12,18 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
import paddle.static.amp as amp
import contextlib
import numpy
import unittest
import math
import sys
import os
import struct
import sys
import tempfile
import unittest
import numpy
import paddle
import paddle.fluid as fluid
import paddle.static.amp as amp
paddle.enable_static()
......
......@@ -12,15 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
import contextlib
import math
import sys
import numpy
import unittest
import os
import sys
import tempfile
import unittest
import numpy
import paddle
import paddle.fluid as fluid
paddle.enable_static()
......
......@@ -13,11 +13,12 @@
# limitations under the License.
import contextlib
import numpy as np
import os
import tempfile
import time
import unittest
import tempfile
import numpy as np
import paddle
import paddle.dataset.conll05 as conll05
......
......@@ -13,15 +13,16 @@
# limitations under the License.
import contextlib
import os
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as pd
from paddle.fluid.executor import Executor
import unittest
import os
paddle.enable_static()
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid.core as core
import math
import os
import sys
......@@ -22,6 +21,7 @@ import numpy
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
paddle.enable_static()
......
......@@ -13,15 +13,17 @@
# limitations under the License.
import math
import sys
import os
import sys
import tempfile
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
import tempfile
from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import SGDOptimizer
......
......@@ -12,18 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import contextlib
import math
import sys
import os
import unittest
import sys
import tempfile
from paddle.fluid.executor import Executor
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
from paddle.fluid.executor import Executor
paddle.enable_static()
......
......@@ -12,14 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
import unittest
import os
import numpy as np
import math
import os
import sys
import tempfile
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
paddle.enable_static()
......
......@@ -13,11 +13,13 @@
# limitations under the License.
import os
from paddle.fluid import core
from distutils.core import Extension, setup
from distutils.sysconfig import get_python_lib
from distutils.core import setup, Extension
from setuptools.command.build_ext import build_ext
from paddle.fluid import core
# refer: https://note.qidong.name/2018/03/setup-warning-strict-prototypes
# Avoid a gcc warning below:
......
......@@ -14,10 +14,12 @@
import os
import site
from paddle.fluid import core
from distutils.core import setup, Extension
from distutils.core import Extension, setup
from setuptools.command.build_ext import build_ext
from paddle.fluid import core
# refer: https://note.qidong.name/2018/03/setup-warning-strict-prototypes
# Avoid a gcc warning below:
......
......@@ -15,6 +15,7 @@
import os
import sys
import unittest
import numpy as np
......
......@@ -13,9 +13,10 @@
# limitations under the License.
import os
import sys
import site
import sys
import unittest
import numpy as np
......
......@@ -13,10 +13,12 @@
# limitations under the License.
import os
from utils import extra_compile_args, paddle_includes
import paddle
import paddle.fluid.core as core
from paddle.utils.cpp_extension import CppExtension, CUDAExtension, setup
from utils import paddle_includes, extra_compile_args
if paddle.is_compiled_with_cuda():
sources = ['custom_raw_op_kernel_op.cc', 'custom_raw_op_kernel_op.cu']
......
......@@ -12,8 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from utils import paddle_includes, extra_compile_args, IS_MAC
from paddle.utils.cpp_extension import CUDAExtension, setup, CppExtension
from utils import IS_MAC, extra_compile_args, paddle_includes
from paddle.utils.cpp_extension import CppExtension, CUDAExtension, setup
# Mac-CI don't support GPU
Extension = CppExtension if IS_MAC else CUDAExtension
......
......@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import unittest
import warnings
import paddle.utils.cpp_extension.extension_utils as utils
......
......@@ -14,13 +14,14 @@
import os
import unittest
import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle
from paddle.utils.cpp_extension import load, get_build_directory
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
from paddle.utils.cpp_extension.extension_utils import run_cmd
from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
......
......@@ -14,13 +14,14 @@
import os
import unittest
import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle
from paddle.utils.cpp_extension import load, get_build_directory
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
from paddle.utils.cpp_extension.extension_utils import run_cmd
from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
......
......@@ -14,14 +14,15 @@
import os
import unittest
import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle
import paddle.static as static
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
......
......@@ -14,14 +14,15 @@
import os
import unittest
import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle
import paddle.static as static
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
......
......@@ -14,15 +14,16 @@
import os
import unittest
import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle
import paddle.static as static
import paddle.nn.functional as F
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
import paddle.static as static
from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
......
......@@ -12,15 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
import paddle
import shlex
import site
import sys
import importlib
import unittest
import numpy as np
import paddle
MODULE_NAME = "custom_raw_op_kernel_op_lib"
......
......@@ -13,18 +13,18 @@
# limitations under the License.
import os
import tempfile
import unittest
import numpy as np
import tempfile
from utils import IS_MAC, extra_cc_args, extra_nvcc_args, paddle_includes
import paddle
from paddle import nn
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args, IS_MAC
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
file = '{}\\custom_relu_for_model_jit\\custom_relu_for_model_jit.pyd'.format(
......
......@@ -14,13 +14,15 @@
import os
import unittest
import paddle
import numpy as np
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import IS_MAC, extra_cc_args, extra_nvcc_args, paddle_includes
from test_custom_relu_op_setup import custom_relu_dynamic, custom_relu_static
from utils import IS_MAC, extra_cc_args, extra_nvcc_args, paddle_includes
import paddle
from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
......
......@@ -13,15 +13,17 @@
# limitations under the License.
import os
import sys
import site
import sys
import unittest
import numpy as np
import paddle
import paddle.static as static
import numpy as np
from paddle.vision.transforms import Compose, Normalize
from paddle.utils.cpp_extension.extension_utils import run_cmd
from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension.extension_utils import run_cmd
from paddle.vision.transforms import Compose, Normalize
def custom_relu_dynamic(func, device, dtype, np_x, use_func=True):
......@@ -318,8 +320,7 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
np_data = np.random.random((1, 1, 28, 28)).astype("float32")
np_label = np.random.random((1, 1)).astype("int64")
path_prefix = "custom_op_inference/custom_relu"
from paddle.inference import Config
from paddle.inference import create_predictor
from paddle.inference import Config, create_predictor
for device in self.devices:
predict = custom_relu_static_inference(
......
......@@ -14,13 +14,14 @@
import os
import unittest
import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
......
......@@ -14,14 +14,15 @@
import os
import unittest
import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle
import paddle.fluid as fluid
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
......
......@@ -14,12 +14,14 @@
import os
import unittest
import paddle
import numpy as np
from paddle.utils.cpp_extension import load, get_build_directory
from utils import paddle_includes, extra_cc_args
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import extra_cc_args, paddle_includes
import paddle
from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
......
......@@ -14,14 +14,14 @@
import os
import unittest
import numpy as np
from utils import extra_cc_args, paddle_includes
import paddle
from paddle.utils.cpp_extension import load
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args
from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
......
......@@ -14,6 +14,7 @@
import os
import unittest
import paddle
......
......@@ -15,6 +15,7 @@
import os
import sys
from distutils.sysconfig import get_python_lib
from paddle.utils.cpp_extension.extension_utils import IS_WINDOWS
IS_MAC = sys.platform.startswith('darwin')
......
......@@ -12,15 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import random
import unittest
import numpy as np
import paddle
from paddle.fluid import core
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid import core
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.framework import _test_eager_guard
def init_process_group(strategy=None):
......
......@@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import copy
import os
import subprocess
import time
import tempfile
import time
import unittest
def start_local_trainers(
......@@ -29,10 +29,10 @@ def start_local_trainers(
log_dir=None,
):
from paddle.distributed.utils.launch_utils import ( # noqa: F401
TrainerProc,
find_free_ports,
watch_local_trainers,
get_cluster,
TrainerProc,
watch_local_trainers,
)
current_env = copy.copy(os.environ.copy())
......@@ -89,10 +89,10 @@ def start_local_trainers(
def get_cluster_from_args(selected_gpus):
from paddle.distributed.utils.launch_utils import ( # noqa: F401
TrainerProc,
find_free_ports,
watch_local_trainers,
get_cluster,
TrainerProc,
watch_local_trainers,
)
cluster_node_ips = '127.0.0.1'
......@@ -117,10 +117,10 @@ def get_cluster_from_args(selected_gpus):
class TestMultipleCustomCPU(unittest.TestCase):
def run_mnist_2custom_cpu(self, target_file_name, eager_mode=True):
from paddle.distributed.utils.launch_utils import ( # noqa: F401
TrainerProc,
find_free_ports,
watch_local_trainers,
get_cluster,
TrainerProc,
watch_local_trainers,
)
selected_devices = [0, 1]
......@@ -179,10 +179,10 @@ class TestProcessGroup(TestMultipleCustomCPU):
def test_process_group_xccl(self):
from paddle.distributed.utils.launch_utils import ( # noqa: F401
TrainerProc,
find_free_ports,
watch_local_trainers,
get_cluster,
TrainerProc,
watch_local_trainers,
)
self.run_mnist_2custom_cpu('process_group_xccl.py')
......
......@@ -14,9 +14,10 @@
import os
import sys
import tempfile
import unittest
import numpy as np
import tempfile
class TestCustomCPUPlugin(unittest.TestCase):
......
......@@ -14,8 +14,8 @@
import os
import sys
import unittest
import tempfile
import unittest
class TestCustomCPUProfilerPlugin(unittest.TestCase):
......
......@@ -14,10 +14,11 @@
import os
import sys
import tempfile
import time
import unittest
import numpy as np
import tempfile
EPOCH_NUM = 1
BATCH_SIZE = 1024
......
......@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import unittest
import paddle
import paddle.fluid as fluid
paddle.enable_static()
......
......@@ -12,17 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.layers import detection
from paddle.fluid.framework import Program, program_guard
import unittest
import contextlib
import unittest
import numpy as np
from unittests.test_imperative_base import new_program_scope
from paddle.fluid.dygraph import base
from paddle.fluid import core
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
from paddle.fluid.dygraph import base
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.layers import detection
paddle.enable_static()
......
......@@ -12,19 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.layers as layers
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.layers.control_flow import (
ConditionalBlock,
merge_lod_tensor,
split_lod_tensor,
)
from paddle.fluid.optimizer import MomentumOptimizer
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.layers.control_flow import split_lod_tensor
from paddle.fluid.layers.control_flow import merge_lod_tensor
from paddle.fluid.layers.control_flow import ConditionalBlock
import unittest
import numpy as np
paddle.enable_static()
......
......@@ -12,14 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.lod_tensor import (
create_lod_tensor,
create_random_int_lodtensor,
)
import numpy as np
import unittest
class TestLoDTensor(unittest.TestCase):
......
......@@ -16,10 +16,10 @@ import unittest
import numpy as np
import paddle.fluid.layers as layers
import paddle.fluid.framework as framework
import paddle.fluid as fluid
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
paddle.enable_static()
......
......@@ -13,6 +13,7 @@
# limitations under the License.
import unittest
import paddle
......
......@@ -13,14 +13,15 @@
# limitations under the License.
import os
from collections import namedtuple
import paddle
import paddle.fluid as fluid
from paddle.fluid import unique_name
import paddle.fluid.core as core
import paddle
from paddle.fluid.layer_helper import LayerHelper
from paddle.distributed import fleet
from paddle.distributed.fleet.meta_optimizers.ascend import ascend_optimizer
from collections import namedtuple
from paddle.fluid import unique_name
from paddle.fluid.layer_helper import LayerHelper
Block = namedtuple('Block', ['program'])
Loss = namedtuple('Loss', ['block'])
......
......@@ -14,11 +14,13 @@
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
paddle.enable_static()
......
......@@ -14,7 +14,9 @@
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
......
......@@ -14,10 +14,12 @@
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
class MyLayer(paddle.nn.Layer):
......
......@@ -14,11 +14,13 @@
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
paddle.enable_static()
......
......@@ -14,6 +14,7 @@
# limitations under the License.
import unittest
import numpy as np
import paddle
......
......@@ -14,11 +14,13 @@
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
paddle.enable_static()
......
......@@ -14,11 +14,13 @@
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
class MyLayer(paddle.nn.Layer):
......
......@@ -13,11 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import unittest
import threading, time
import paddle
import numpy as np
import paddle
class TestASPUtils(unittest.TestCase):
def test_get_check_method(self):
......
......@@ -13,13 +13,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.distributed.fleet as fleet
import os
import unittest
import numpy as np
import paddle
import paddle.distributed.fleet as fleet
import paddle.fluid.core as core
import os
from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES')
if cuda_visible_devices is None or cuda_visible_devices == "":
......
......@@ -13,14 +13,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.distributed.fleet as fleet
import os
import unittest
import numpy as np
import paddle
import paddle.distributed.fleet as fleet
import paddle.fluid as fluid
import os
from paddle.static import sparsity
from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
from paddle.static import sparsity
cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES')
if cuda_visible_devices is None or cuda_visible_devices == "":
......
......@@ -13,14 +13,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.distributed.fleet as fleet
import os
import unittest
import numpy as np
import paddle
import paddle.distributed.fleet as fleet
import paddle.fluid as fluid
import os
from paddle.static import sparsity
from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
from paddle.static import sparsity
cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES')
if cuda_visible_devices is None or cuda_visible_devices == "":
......
......@@ -12,15 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.fluid as fluid
import os
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp
from paddle.fluid.framework import program_guard
from paddle.fluid import unique_name
import numpy as np
from paddle.fluid.framework import program_guard
BATCH_NUM = 4
BATCH_SIZE = 1
......
......@@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import random
import unittest
import numpy as np
import paddle
from get_gpt_model import FakeDataset, generate_model
import paddle
from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import FakeDataset, generate_model
def apply_pass(use_amp=False, level=None):
......
......@@ -13,10 +13,11 @@
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F
import paddle.static as static
import paddle.utils as utils
from paddle.distributed import fleet
from paddle.distributed.fleet import auto
......
......@@ -12,18 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.static as static
from paddle.distributed import fleet
import sys
import numpy as np
import paddle
import paddle.static as static
from paddle.distributed import fleet
sys.path.append("..")
import auto_parallel_gpt_model as modeling
from auto_parallel_gpt_model import (
GPTModel,
GPTForPretraining,
GPTModel,
GPTPretrainingCriterion,
)
......
......@@ -15,16 +15,18 @@
import paddle
import paddle.static as static
from paddle.distributed import fleet
from paddle.distributed.auto_parallel.cost import CostEstimator
from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cost import CostEstimator
from paddle.distributed.auto_parallel.dist_context import (
get_default_distributed_context,
)
def train():
from auto_parallel_relaunch_model import mlp_pretrain_forward
from auto_parallel_relaunch_model import batch_generator_creator
from auto_parallel_relaunch_model import (
batch_generator_creator,
mlp_pretrain_forward,
)
dist_strategy = fleet.DistributedStrategy()
# init parallel optimizer
......
......@@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import random
import unittest
import numpy as np
import paddle
from get_gpt_model import FakeDataset, generate_model
import paddle
from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import FakeDataset, generate_model
paddle.enable_static()
......
......@@ -12,17 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import os
import tempfile
import numpy as np
import paddle
import paddle.static as static
import paddle.utils as utils
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import Dataset
import paddle.static as static
import paddle.utils as utils
from paddle.distributed.fleet import auto
from paddle.io import Dataset
paddle.enable_static()
......
......@@ -12,15 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import os
import tempfile
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import Dataset
from paddle.distributed.fleet import auto
from paddle.io import Dataset
paddle.enable_static()
batch_size = 2
......
......@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import sys
import numpy as np
import random
import paddle
from paddle.distributed.fleet import auto
......@@ -22,8 +23,8 @@ from paddle.distributed.fleet import auto
sys.path.append("..")
import auto_parallel_gpt_model as modeling
from auto_parallel_gpt_model import (
GPTModel,
GPTForPretraining,
GPTModel,
GPTPretrainingCriterion,
)
......
......@@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import random
import unittest
import numpy as np
import paddle
from get_gpt_model import FakeDataset, generate_model
import paddle
from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import FakeDataset, generate_model
paddle.enable_static()
......
......@@ -12,8 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
import paddle
from paddle.distributed.fleet import auto
from paddle.incubate.autograd import Hessian
......
......@@ -12,13 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import os
import tempfile
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.distributed.fleet import auto
paddle.enable_static()
......
......@@ -13,6 +13,7 @@
# limitations under the License.
import os
from paddle.distributed.fleet import launch
from paddle.distributed.fleet.launch_utils import run_with_coverage
......
......@@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from engine_api_dp import MyDataset
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.distributed.fleet import auto
from engine_api_dp import MyDataset
paddle.enable_static()
batch_size = 16
......
......@@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import random
import unittest
import numpy as np
import paddle
from get_gpt_model import FakeDataset, generate_model
import paddle
from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import FakeDataset, generate_model
def apply_pass(use_recompute=False, no_recompute_segments=[]):
......
......@@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import random
import unittest
import numpy as np
import paddle
from get_gpt_model import FakeDataset, generate_model
import paddle
from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import FakeDataset, generate_model
paddle.enable_static()
......
......@@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import os
import sys
import json
import os
import subprocess
import sys
import tempfile
import unittest
cluster_json = """
{
......
......@@ -12,38 +12,35 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import json
import os
import tempfile
import unittest
from test_cluster import cluster_json
import paddle
import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F
import paddle.static as static
import paddle.utils as utils
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed import fleet
from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer
from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cost.base_cost import (
build_comp_desc_from_dist_op,
)
from paddle.distributed.auto_parallel.cost.base_cost import (
build_comm_desc_from_dist_op,
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.cost import (
AllreduceSumOpCost,
_g_op_cost_factory,
)
from paddle.distributed.auto_parallel.cost.base_cost import (
build_comm_costs_from_descs,
)
from paddle.distributed.auto_parallel.cost.base_cost import (
build_comm_desc_from_dist_op,
build_comp_costs_from_descs,
build_comp_desc_from_dist_op,
build_dp_costs,
)
from paddle.distributed.auto_parallel.cost.base_cost import build_dp_costs
from paddle.distributed.auto_parallel.cost import AllreduceSumOpCost
from paddle.distributed.auto_parallel.cost import _g_op_cost_factory
from test_cluster import cluster_json
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer
from paddle.distributed.fleet import auto
paddle.enable_static()
_global_parallel_strategy = "dp_mp_pp"
......
......@@ -12,13 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
import unittest
import os
import json
from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cluster import get_default_cluster
from paddle.distributed.auto_parallel.cluster import (
Cluster,
get_default_cluster,
)
cluster_json = """
{
......
......@@ -13,9 +13,8 @@
# limitations under the License
import unittest
from paddle.distributed.auto_parallel.cluster_v2 import Device
from paddle.distributed.auto_parallel.cluster_v2 import Link
from paddle.distributed.auto_parallel.cluster_v2 import DeviceMesh
from paddle.distributed.auto_parallel.cluster_v2 import Device, DeviceMesh, Link
class TestDeviceMesh(unittest.TestCase):
......
......@@ -12,23 +12,25 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import json
import os
import tempfile
import unittest
from test_cluster import cluster_json, multi_cluster_json
import paddle
from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cost import CommContext
from paddle.distributed.auto_parallel.cost import build_comm_desc
from paddle.distributed.auto_parallel.cost import AllreduceSumOpCost
from paddle.distributed.auto_parallel.cost import AllgatherOpCost
from paddle.distributed.auto_parallel.cost import BroadcastOpCost
from paddle.distributed.auto_parallel.cost import SendOpCost
from paddle.distributed.auto_parallel.cost import RecvOpCost
from paddle.distributed.auto_parallel.cost import IdentityOpCost
from test_cluster import cluster_json, multi_cluster_json
from paddle.distributed.auto_parallel.cost import (
AllgatherOpCost,
AllreduceSumOpCost,
BroadcastOpCost,
CommContext,
IdentityOpCost,
RecvOpCost,
SendOpCost,
build_comm_desc,
)
class TestCommOpCost(unittest.TestCase):
......
......@@ -12,131 +12,81 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import json
import os
import unittest
from test_cluster import cluster_json
from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cost.comp_op_cost import AssignOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import AssignValueOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import BeamSearchOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
AssignOpCost,
AssignValueOpCost,
BeamSearchDecodeOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import CastOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import ConcatOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ElementwiseAddOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
BeamSearchOpCost,
CastOpCost,
ConcatOpCost,
DropoutGradOpCost,
ElementwiseAddGradOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ElementwiseDivOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ElementwiseAddOpCost,
ElementwiseDivGradOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ElementwiseMulOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ElementwiseDivOpCost,
ElementwiseMulGradOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ElementwiseMulOpCost,
ElementwiseSubOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import EmbeddingOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
EmbeddingGradOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
FillConstantOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
EmbeddingOpCost,
FillConstantBatchSizeLikeOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import GatherOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import GeluOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import GeluGradOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
FillConstantOpCost,
FusedSoftmaxMaskUpperTriangleGradOpCost,
FusedSoftmaxMaskUpperTriangleOpCost,
GatherOpCost,
GeluGradOpCost,
GeluOpCost,
GreaterEqualOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import IncrementOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import IsEmptyOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import LayerNormOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
IncrementOpCost,
IsEmptyOpCost,
LayerNormGradOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import LessThanOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import LogicalNotOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import LogicalAndOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import LodResetOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import LogOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
LookupTableV2OpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
LayerNormOpCost,
LessThanOpCost,
LodResetOpCost,
LogicalAndOpCost,
LogicalNotOpCost,
LogOpCost,
LookupTableV2GradOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulV2OpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
LookupTableV2OpCost,
MatmulOpCost,
MatmulV2GradOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import MemcpyOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import MulOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import MulGradOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import OneHotOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
MatmulV2OpCost,
MemcpyOpCost,
MulGradOpCost,
MulOpCost,
OneHotOpCost,
ReadFromArrayOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import ReduceSumOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ReduceMeanGradOpCost,
ReduceMeanOpCost,
ReduceSumGradOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import Reshape2OpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ReduceSumOpCost,
Reshape2GradOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import ReduceMeanOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ReduceMeanGradOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import SamplingIdOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import ScaleOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import SliceOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import SoftmaxOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import SoftmaxGradOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
SoftmaxWithCrossEntropyOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
Reshape2OpCost,
SamplingIdOpCost,
ScaleOpCost,
SliceOpCost,
SoftmaxGradOpCost,
SoftmaxOpCost,
SoftmaxWithCrossEntropyGradOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import SplitOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import Squeeze2OpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import SquareOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import SquareGradOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import SumOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import TopKOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import Transpose2OpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
SoftmaxWithCrossEntropyOpCost,
SplitOpCost,
SquareGradOpCost,
SquareOpCost,
Squeeze2OpCost,
SumOpCost,
TopKOpCost,
Transpose2GradOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import Unsqueeze2OpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
Transpose2OpCost,
Unsqueeze2OpCost,
WriteToArrayOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import DropoutGradOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
FusedSoftmaxMaskUpperTriangleOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
FusedSoftmaxMaskUpperTriangleGradOpCost,
)
from test_cluster import cluster_json
class TestCompOpCost(unittest.TestCase):
......
......@@ -17,9 +17,8 @@ import unittest
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.static import InputSpec
from paddle.distributed.fleet import auto
from paddle.static import InputSpec
class MLPLayer(nn.Layer):
......
......@@ -12,11 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import os
import sys
import subprocess
import sys
import tempfile
import unittest
from paddle.distributed.auto_parallel.converter import Converter
......
......@@ -13,6 +13,7 @@
# limitations under the License.
import unittest
import paddle
from paddle.distributed.fleet import auto
......@@ -38,8 +39,8 @@ def make_program():
def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func()
......
......@@ -12,31 +12,29 @@
# See the License for the specific language governing permissions and
# limitations under the License
import unittest
import copy
import unittest
import paddle
import numpy as np
import paddle
import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F
import paddle.static as static
from paddle.distributed import fleet
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.dist_context import (
DistributedContext,
set_default_distributed_context,
)
from paddle.distributed.auto_parallel.process_mesh_v2 import ProcessMesh
from paddle.distributed.auto_parallel.utils import (
_copy_dist_attr_to_cpp,
_copy_dist_attr_from_cpp,
_copy_dist_attr_to_cpp_for_graph,
_copy_dist_attr_from_cpp_for_graph,
_copy_dist_attr_to_cpp,
_copy_dist_attr_to_cpp_for_graph,
)
from paddle.fluid.core import TensorDistAttr
from paddle.fluid.core import OperatorDistAttr
from paddle.distributed.auto_parallel.process_mesh_v2 import ProcessMesh
from paddle.distributed.fleet import auto
from paddle.fluid.core import OperatorDistAttr, TensorDistAttr
paddle.enable_static()
......
......@@ -12,18 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import copy
import unittest
import paddle
import numpy as np
import paddle
import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F
import paddle.static as static
from paddle.distributed import fleet
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.fleet import auto
paddle.enable_static()
......
......@@ -13,11 +13,12 @@
# limitations under the License.
import unittest
import paddle
from paddle.distributed.fleet import auto
from test_dist_pnorm import parallelizer
import paddle
from paddle.distributed.fleet import auto
paddle.enable_static()
......
......@@ -13,9 +13,9 @@
# limitations under the License.
import unittest
import paddle
from paddle.distributed.fleet import auto
from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward
......@@ -106,8 +106,8 @@ def matmulv2_dp2mp2(init_x, init_y, trans_x, trans_y):
def parallelizer(program_func, *args, **kwargs):
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program, loss = program_func(*args, **kwargs)
......
......@@ -12,21 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import copy
import unittest
import paddle
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.operators.common import (
get_distributed_operator_impl_container,
is_elementwise_op,
)
from paddle.distributed.fleet import auto
from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward
from paddle.fluid.backward import append_backward
paddle.enable_static()
......
......@@ -13,9 +13,9 @@
# limitations under the License.
import unittest
import paddle
from paddle.distributed.fleet import auto
from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward
......@@ -50,8 +50,8 @@ def make_program_serial():
def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program, loss = program_func()
......
......@@ -13,6 +13,7 @@
# limitations under the License.
import unittest
import paddle
from paddle.distributed.fleet import auto
......@@ -37,8 +38,8 @@ def make_program_dp2():
def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func()
......
......@@ -13,6 +13,7 @@
# limitations under the License.
import unittest
import paddle
from paddle.distributed.fleet import auto
......@@ -34,8 +35,8 @@ def make_program():
def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func()
......
......@@ -13,6 +13,7 @@
# limitations under the License.
import unittest
import paddle
from paddle.distributed.fleet import auto
......@@ -34,8 +35,8 @@ def make_program():
def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func()
......
......@@ -13,6 +13,7 @@
# limitations under the License.
import unittest
import paddle
from paddle.distributed.fleet import auto
......@@ -56,8 +57,8 @@ def make_program_serial():
def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func()
......
......@@ -13,6 +13,7 @@
# limitations under the License.
import unittest
import paddle
from paddle.distributed.fleet import auto
......@@ -34,8 +35,8 @@ def make_program_dp2():
def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func()
......
......@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import os
import sys
import subprocess
import sys
import tempfile
import unittest
class TestEngineAPI(unittest.TestCase):
......
......@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import os
import sys
import subprocess
import sys
import tempfile
import unittest
class TestEngineAPI(unittest.TestCase):
......
......@@ -13,14 +13,15 @@
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static as static
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import Dataset
import paddle.static as static
from paddle.distributed.fleet import auto
from paddle.io import Dataset
paddle.enable_static()
......
......@@ -12,20 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import tempfile
import random
import shutil
import tempfile
import time
import random
import unittest
import paddle
import paddle.vision.transforms as T
from paddle.static import InputSpec
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.callbacks import config_callbacks
from paddle.vision.models import LeNet
from paddle.distributed.fleet import auto
from paddle.static import InputSpec
from paddle.vision.datasets import MNIST
from paddle.vision.models import LeNet
paddle.enable_static()
......
......@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import copy
import unittest
import paddle
from paddle.distributed.fleet import auto
......@@ -65,8 +65,8 @@ def make_program():
def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func()
......
......@@ -14,6 +14,7 @@
import sys
import unittest
import numpy as np
import paddle
......@@ -22,8 +23,8 @@ import paddle.static as static
sys.path.append("..")
import auto_parallel_gpt_model as modeling
from auto_parallel_gpt_model import (
GPTModel,
GPTForPretraining,
GPTModel,
GPTPretrainingCriterion,
)
......@@ -111,12 +112,12 @@ class TestGroupOperators(unittest.TestCase):
sequence_len,
vocab_size,
)
from paddle.distributed.auto_parallel.tuner.rule_based_tuner import (
RuleBasedTuner,
)
from paddle.distributed.auto_parallel.dist_context import (
DistributedContext,
)
from paddle.distributed.auto_parallel.tuner.rule_based_tuner import (
RuleBasedTuner,
)
dist_context = DistributedContext()
tuner = RuleBasedTuner(dist_context)
......
......@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import os
import sys
import subprocess
import sys
import tempfile
import unittest
class TestHighOrderGrad(unittest.TestCase):
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部