未验证 提交 cfd7ff8f 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][isort] introducing `isort` (part1) (#46475)

* add isort config

* isort all files
上级 34fd65cf
...@@ -60,6 +60,10 @@ repos: ...@@ -60,6 +60,10 @@ repos:
hooks: hooks:
- id: black - id: black
files: (.*\.(py|pyi|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ files: (.*\.(py|pyi|bzl)|BUILD|.*\.BUILD|WORKSPACE)$
- repo: https://github.com/pycqa/isort
rev: 5.10.1
hooks:
- id: isort
- repo: https://github.com/PyCQA/flake8 - repo: https://github.com/PyCQA/flake8
rev: 4.0.1 rev: 4.0.1
hooks: hooks:
......
...@@ -2,3 +2,30 @@ ...@@ -2,3 +2,30 @@
exclude = "build" exclude = "build"
line-length = 80 line-length = 80
skip-string-normalization = true skip-string-normalization = true
[tool.isort]
profile = "black"
line_length = 80
known_first_party = ["paddle"]
skip = ["build", "__init__.py"]
extend_skip_glob = [
# These files do not need to be formatted,
# see .flake8 for more details
"python/paddle/fluid/[!t]**",
"python/paddle/fluid/tra**",
"*_pb2.py",
"python/paddle/utils/gast/**",
"python/paddle/fluid/tests/unittests/npu/**",
"python/paddle/fluid/tests/unittests/mlu/**",
# These files will be fixed in the future
"cmake/**",
"paddle/**",
"r/**",
"tools/**",
"python/paddle/[!f]**",
"python/paddle/fluid/tests/unittests/[t-z]**",
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_error.py",
"python/paddle/fluid/tests/unittests/dygraph_to_static/**",
"python/paddle/fluid/tests/unittests/ipu/test_dy2static_ipu.py",
]
...@@ -13,14 +13,17 @@ ...@@ -13,14 +13,17 @@
# limitations under the License. # limitations under the License.
from typing import Sequence from typing import Sequence
import numpy as np import numpy as np
import paddle import paddle
from .tensor.attribute import is_floating_point, is_integer
from .tensor.creation import _real_to_complex_dtype, _complex_to_real_dtype
from .fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from . import _C_ops, _legacy_C_ops from . import _C_ops, _legacy_C_ops
from .fluid.data_feeder import check_variable_and_dtype from .fluid.data_feeder import check_variable_and_dtype
from .fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from .fluid.layer_helper import LayerHelper from .fluid.layer_helper import LayerHelper
from .tensor.attribute import is_floating_point, is_integer
from .tensor.creation import _complex_to_real_dtype, _real_to_complex_dtype
__all__ = [ __all__ = [
'fft', 'fft',
......
...@@ -12,14 +12,16 @@ ...@@ -12,14 +12,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import paddle.fluid as fluid
import paddle
import contextlib import contextlib
import math import math
import numpy as np
import sys
import os import os
import sys
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
def convolution_net( def convolution_net(
......
...@@ -12,18 +12,19 @@ ...@@ -12,18 +12,19 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle
import paddle.fluid as fluid
import paddle.static.amp as amp
import contextlib import contextlib
import numpy
import unittest
import math import math
import sys
import os import os
import struct import struct
import sys
import tempfile import tempfile
import unittest
import numpy
import paddle
import paddle.fluid as fluid
import paddle.static.amp as amp
paddle.enable_static() paddle.enable_static()
......
...@@ -12,15 +12,17 @@ ...@@ -12,15 +12,17 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle
import paddle.fluid as fluid
import contextlib import contextlib
import math import math
import sys
import numpy
import unittest
import os import os
import sys
import tempfile import tempfile
import unittest
import numpy
import paddle
import paddle.fluid as fluid
paddle.enable_static() paddle.enable_static()
......
...@@ -13,11 +13,12 @@ ...@@ -13,11 +13,12 @@
# limitations under the License. # limitations under the License.
import contextlib import contextlib
import numpy as np
import os import os
import tempfile
import time import time
import unittest import unittest
import tempfile
import numpy as np
import paddle import paddle
import paddle.dataset.conll05 as conll05 import paddle.dataset.conll05 as conll05
......
...@@ -13,15 +13,16 @@ ...@@ -13,15 +13,16 @@
# limitations under the License. # limitations under the License.
import contextlib import contextlib
import os
import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
import paddle.fluid.layers as pd import paddle.fluid.layers as pd
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
import unittest
import os
paddle.enable_static() paddle.enable_static()
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle.fluid.core as core
import math import math
import os import os
import sys import sys
...@@ -22,6 +21,7 @@ import numpy ...@@ -22,6 +21,7 @@ import numpy
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core
paddle.enable_static() paddle.enable_static()
......
...@@ -13,15 +13,17 @@ ...@@ -13,15 +13,17 @@
# limitations under the License. # limitations under the License.
import math import math
import sys
import os import os
import sys
import tempfile
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import paddle.fluid.nets as nets import paddle.fluid.nets as nets
import tempfile
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
......
...@@ -12,18 +12,19 @@ ...@@ -12,18 +12,19 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import contextlib import contextlib
import math import math
import sys
import os import os
import unittest import sys
import tempfile import tempfile
from paddle.fluid.executor import Executor import unittest
import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
from paddle.fluid.executor import Executor
paddle.enable_static() paddle.enable_static()
......
...@@ -12,14 +12,16 @@ ...@@ -12,14 +12,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle
import paddle.fluid as fluid
import unittest
import os
import numpy as np
import math import math
import os
import sys import sys
import tempfile import tempfile
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
paddle.enable_static() paddle.enable_static()
......
...@@ -13,11 +13,13 @@ ...@@ -13,11 +13,13 @@
# limitations under the License. # limitations under the License.
import os import os
from paddle.fluid import core from distutils.core import Extension, setup
from distutils.sysconfig import get_python_lib from distutils.sysconfig import get_python_lib
from distutils.core import setup, Extension
from setuptools.command.build_ext import build_ext from setuptools.command.build_ext import build_ext
from paddle.fluid import core
# refer: https://note.qidong.name/2018/03/setup-warning-strict-prototypes # refer: https://note.qidong.name/2018/03/setup-warning-strict-prototypes
# Avoid a gcc warning below: # Avoid a gcc warning below:
......
...@@ -14,10 +14,12 @@ ...@@ -14,10 +14,12 @@
import os import os
import site import site
from paddle.fluid import core from distutils.core import Extension, setup
from distutils.core import setup, Extension
from setuptools.command.build_ext import build_ext from setuptools.command.build_ext import build_ext
from paddle.fluid import core
# refer: https://note.qidong.name/2018/03/setup-warning-strict-prototypes # refer: https://note.qidong.name/2018/03/setup-warning-strict-prototypes
# Avoid a gcc warning below: # Avoid a gcc warning below:
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
import os import os
import sys import sys
import unittest import unittest
import numpy as np import numpy as np
......
...@@ -13,9 +13,10 @@ ...@@ -13,9 +13,10 @@
# limitations under the License. # limitations under the License.
import os import os
import sys
import site import site
import sys
import unittest import unittest
import numpy as np import numpy as np
......
...@@ -13,10 +13,12 @@ ...@@ -13,10 +13,12 @@
# limitations under the License. # limitations under the License.
import os import os
from utils import extra_compile_args, paddle_includes
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.utils.cpp_extension import CppExtension, CUDAExtension, setup from paddle.utils.cpp_extension import CppExtension, CUDAExtension, setup
from utils import paddle_includes, extra_compile_args
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
sources = ['custom_raw_op_kernel_op.cc', 'custom_raw_op_kernel_op.cu'] sources = ['custom_raw_op_kernel_op.cc', 'custom_raw_op_kernel_op.cu']
......
...@@ -12,8 +12,9 @@ ...@@ -12,8 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from utils import paddle_includes, extra_compile_args, IS_MAC from utils import IS_MAC, extra_compile_args, paddle_includes
from paddle.utils.cpp_extension import CUDAExtension, setup, CppExtension
from paddle.utils.cpp_extension import CppExtension, CUDAExtension, setup
# Mac-CI don't support GPU # Mac-CI don't support GPU
Extension = CppExtension if IS_MAC else CUDAExtension Extension = CppExtension if IS_MAC else CUDAExtension
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import os import os
import unittest
import warnings import warnings
import paddle.utils.cpp_extension.extension_utils as utils import paddle.utils.cpp_extension.extension_utils as utils
......
...@@ -14,13 +14,14 @@ ...@@ -14,13 +14,14 @@
import os import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle import paddle
from paddle.utils.cpp_extension import load, get_build_directory
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
from paddle.utils.cpp_extension.extension_utils import run_cmd
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the # Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed. # cache dir, it will not be compiled again unless the shared lib is removed.
......
...@@ -14,13 +14,14 @@ ...@@ -14,13 +14,14 @@
import os import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle import paddle
from paddle.utils.cpp_extension import load, get_build_directory
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
from paddle.utils.cpp_extension.extension_utils import run_cmd
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the # Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed. # cache dir, it will not be compiled again unless the shared lib is removed.
......
...@@ -14,14 +14,15 @@ ...@@ -14,14 +14,15 @@
import os import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle import paddle
import paddle.static as static import paddle.static as static
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the # Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed. # cache dir, it will not be compiled again unless the shared lib is removed.
......
...@@ -14,14 +14,15 @@ ...@@ -14,14 +14,15 @@
import os import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle import paddle
import paddle.static as static import paddle.static as static
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the # Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed. # cache dir, it will not be compiled again unless the shared lib is removed.
......
...@@ -14,15 +14,16 @@ ...@@ -14,15 +14,16 @@
import os import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle import paddle
import paddle.static as static
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.utils.cpp_extension import load, get_build_directory import paddle.static as static
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the # Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed. # cache dir, it will not be compiled again unless the shared lib is removed.
......
...@@ -12,15 +12,17 @@ ...@@ -12,15 +12,17 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import importlib
import os import os
import paddle
import shlex import shlex
import site import site
import sys import sys
import importlib
import unittest import unittest
import numpy as np import numpy as np
import paddle
MODULE_NAME = "custom_raw_op_kernel_op_lib" MODULE_NAME = "custom_raw_op_kernel_op_lib"
......
...@@ -13,18 +13,18 @@ ...@@ -13,18 +13,18 @@
# limitations under the License. # limitations under the License.
import os import os
import tempfile
import unittest import unittest
import numpy as np import numpy as np
import tempfile from utils import IS_MAC, extra_cc_args, extra_nvcc_args, paddle_includes
import paddle import paddle
from paddle import nn from paddle import nn
from paddle.utils.cpp_extension import load, get_build_directory from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args, IS_MAC
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
# Because Windows don't use docker, the shared lib already exists in the # Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed. # cache dir, it will not be compiled again unless the shared lib is removed.
file = '{}\\custom_relu_for_model_jit\\custom_relu_for_model_jit.pyd'.format( file = '{}\\custom_relu_for_model_jit\\custom_relu_for_model_jit.pyd'.format(
......
...@@ -14,13 +14,15 @@ ...@@ -14,13 +14,15 @@
import os import os
import unittest import unittest
import paddle
import numpy as np import numpy as np
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import IS_MAC, extra_cc_args, extra_nvcc_args, paddle_includes
from test_custom_relu_op_setup import custom_relu_dynamic, custom_relu_static from test_custom_relu_op_setup import custom_relu_dynamic, custom_relu_static
from utils import IS_MAC, extra_cc_args, extra_nvcc_args, paddle_includes
import paddle
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the # Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed. # cache dir, it will not be compiled again unless the shared lib is removed.
......
...@@ -13,15 +13,17 @@ ...@@ -13,15 +13,17 @@
# limitations under the License. # limitations under the License.
import os import os
import sys
import site import site
import sys
import unittest import unittest
import numpy as np
import paddle import paddle
import paddle.static as static import paddle.static as static
import numpy as np
from paddle.vision.transforms import Compose, Normalize
from paddle.utils.cpp_extension.extension_utils import run_cmd
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension.extension_utils import run_cmd
from paddle.vision.transforms import Compose, Normalize
def custom_relu_dynamic(func, device, dtype, np_x, use_func=True): def custom_relu_dynamic(func, device, dtype, np_x, use_func=True):
...@@ -318,8 +320,7 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase): ...@@ -318,8 +320,7 @@ class TestNewCustomOpSetUpInstall(unittest.TestCase):
np_data = np.random.random((1, 1, 28, 28)).astype("float32") np_data = np.random.random((1, 1, 28, 28)).astype("float32")
np_label = np.random.random((1, 1)).astype("int64") np_label = np.random.random((1, 1)).astype("int64")
path_prefix = "custom_op_inference/custom_relu" path_prefix = "custom_op_inference/custom_relu"
from paddle.inference import Config from paddle.inference import Config, create_predictor
from paddle.inference import create_predictor
for device in self.devices: for device in self.devices:
predict = custom_relu_static_inference( predict = custom_relu_static_inference(
......
...@@ -14,13 +14,14 @@ ...@@ -14,13 +14,14 @@
import os import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle import paddle
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the # Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed. # cache dir, it will not be compiled again unless the shared lib is removed.
......
...@@ -14,14 +14,15 @@ ...@@ -14,14 +14,15 @@
import os import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, extra_nvcc_args, paddle_includes
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the # Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed. # cache dir, it will not be compiled again unless the shared lib is removed.
......
...@@ -14,12 +14,14 @@ ...@@ -14,12 +14,14 @@
import os import os
import unittest import unittest
import paddle
import numpy as np import numpy as np
from paddle.utils.cpp_extension import load, get_build_directory from utils import extra_cc_args, paddle_includes
from utils import paddle_includes, extra_cc_args
from paddle.utils.cpp_extension.extension_utils import run_cmd import paddle
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the # Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed. # cache dir, it will not be compiled again unless the shared lib is removed.
......
...@@ -14,14 +14,14 @@ ...@@ -14,14 +14,14 @@
import os import os
import unittest import unittest
import numpy as np import numpy as np
from utils import extra_cc_args, paddle_includes
import paddle import paddle
from paddle.utils.cpp_extension import load
from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd
# Because Windows don't use docker, the shared lib already exists in the # Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed. # cache dir, it will not be compiled again unless the shared lib is removed.
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
import os import os
import unittest import unittest
import paddle import paddle
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
import os import os
import sys import sys
from distutils.sysconfig import get_python_lib from distutils.sysconfig import get_python_lib
from paddle.utils.cpp_extension.extension_utils import IS_WINDOWS from paddle.utils.cpp_extension.extension_utils import IS_WINDOWS
IS_MAC = sys.platform.startswith('darwin') IS_MAC = sys.platform.startswith('darwin')
......
...@@ -12,15 +12,16 @@ ...@@ -12,15 +12,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import random import random
import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle.fluid import core
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard from paddle.fluid import core
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.framework import _test_eager_guard
def init_process_group(strategy=None): def init_process_group(strategy=None):
......
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import os
import copy import copy
import os
import subprocess import subprocess
import time
import tempfile import tempfile
import time
import unittest
def start_local_trainers( def start_local_trainers(
...@@ -29,10 +29,10 @@ def start_local_trainers( ...@@ -29,10 +29,10 @@ def start_local_trainers(
log_dir=None, log_dir=None,
): ):
from paddle.distributed.utils.launch_utils import ( # noqa: F401 from paddle.distributed.utils.launch_utils import ( # noqa: F401
TrainerProc,
find_free_ports, find_free_ports,
watch_local_trainers,
get_cluster, get_cluster,
TrainerProc, watch_local_trainers,
) )
current_env = copy.copy(os.environ.copy()) current_env = copy.copy(os.environ.copy())
...@@ -89,10 +89,10 @@ def start_local_trainers( ...@@ -89,10 +89,10 @@ def start_local_trainers(
def get_cluster_from_args(selected_gpus): def get_cluster_from_args(selected_gpus):
from paddle.distributed.utils.launch_utils import ( # noqa: F401 from paddle.distributed.utils.launch_utils import ( # noqa: F401
TrainerProc,
find_free_ports, find_free_ports,
watch_local_trainers,
get_cluster, get_cluster,
TrainerProc, watch_local_trainers,
) )
cluster_node_ips = '127.0.0.1' cluster_node_ips = '127.0.0.1'
...@@ -117,10 +117,10 @@ def get_cluster_from_args(selected_gpus): ...@@ -117,10 +117,10 @@ def get_cluster_from_args(selected_gpus):
class TestMultipleCustomCPU(unittest.TestCase): class TestMultipleCustomCPU(unittest.TestCase):
def run_mnist_2custom_cpu(self, target_file_name, eager_mode=True): def run_mnist_2custom_cpu(self, target_file_name, eager_mode=True):
from paddle.distributed.utils.launch_utils import ( # noqa: F401 from paddle.distributed.utils.launch_utils import ( # noqa: F401
TrainerProc,
find_free_ports, find_free_ports,
watch_local_trainers,
get_cluster, get_cluster,
TrainerProc, watch_local_trainers,
) )
selected_devices = [0, 1] selected_devices = [0, 1]
...@@ -179,10 +179,10 @@ class TestProcessGroup(TestMultipleCustomCPU): ...@@ -179,10 +179,10 @@ class TestProcessGroup(TestMultipleCustomCPU):
def test_process_group_xccl(self): def test_process_group_xccl(self):
from paddle.distributed.utils.launch_utils import ( # noqa: F401 from paddle.distributed.utils.launch_utils import ( # noqa: F401
TrainerProc,
find_free_ports, find_free_ports,
watch_local_trainers,
get_cluster, get_cluster,
TrainerProc, watch_local_trainers,
) )
self.run_mnist_2custom_cpu('process_group_xccl.py') self.run_mnist_2custom_cpu('process_group_xccl.py')
......
...@@ -14,9 +14,10 @@ ...@@ -14,9 +14,10 @@
import os import os
import sys import sys
import tempfile
import unittest import unittest
import numpy as np import numpy as np
import tempfile
class TestCustomCPUPlugin(unittest.TestCase): class TestCustomCPUPlugin(unittest.TestCase):
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
import os import os
import sys import sys
import unittest
import tempfile import tempfile
import unittest
class TestCustomCPUProfilerPlugin(unittest.TestCase): class TestCustomCPUProfilerPlugin(unittest.TestCase):
......
...@@ -14,10 +14,11 @@ ...@@ -14,10 +14,11 @@
import os import os
import sys import sys
import tempfile
import time import time
import unittest import unittest
import numpy as np import numpy as np
import tempfile
EPOCH_NUM = 1 EPOCH_NUM = 1
BATCH_SIZE = 1024 BATCH_SIZE = 1024
......
...@@ -12,9 +12,10 @@ ...@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle.fluid as fluid
import unittest import unittest
import paddle import paddle
import paddle.fluid as fluid
paddle.enable_static() paddle.enable_static()
......
...@@ -12,17 +12,19 @@ ...@@ -12,17 +12,19 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.layers import detection
from paddle.fluid.framework import Program, program_guard
import unittest
import contextlib import contextlib
import unittest
import numpy as np import numpy as np
from unittests.test_imperative_base import new_program_scope from unittests.test_imperative_base import new_program_scope
from paddle.fluid.dygraph import base
from paddle.fluid import core
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
from paddle.fluid.dygraph import base
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.layers import detection
paddle.enable_static() paddle.enable_static()
......
...@@ -12,19 +12,22 @@ ...@@ -12,19 +12,22 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.layers.control_flow import (
ConditionalBlock,
merge_lod_tensor,
split_lod_tensor,
)
from paddle.fluid.optimizer import MomentumOptimizer from paddle.fluid.optimizer import MomentumOptimizer
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.layers.control_flow import split_lod_tensor
from paddle.fluid.layers.control_flow import merge_lod_tensor
from paddle.fluid.layers.control_flow import ConditionalBlock
import unittest
import numpy as np
paddle.enable_static() paddle.enable_static()
......
...@@ -12,14 +12,16 @@ ...@@ -12,14 +12,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.lod_tensor import ( from paddle.fluid.lod_tensor import (
create_lod_tensor, create_lod_tensor,
create_random_int_lodtensor, create_random_int_lodtensor,
) )
import numpy as np
import unittest
class TestLoDTensor(unittest.TestCase): class TestLoDTensor(unittest.TestCase):
......
...@@ -16,10 +16,10 @@ import unittest ...@@ -16,10 +16,10 @@ import unittest
import numpy as np import numpy as np
import paddle.fluid.layers as layers
import paddle.fluid.framework as framework
import paddle.fluid as fluid
import paddle import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
paddle.enable_static() paddle.enable_static()
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import paddle import paddle
......
...@@ -13,14 +13,15 @@ ...@@ -13,14 +13,15 @@
# limitations under the License. # limitations under the License.
import os import os
from collections import namedtuple
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import unique_name
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle
from paddle.fluid.layer_helper import LayerHelper
from paddle.distributed import fleet from paddle.distributed import fleet
from paddle.distributed.fleet.meta_optimizers.ascend import ascend_optimizer from paddle.distributed.fleet.meta_optimizers.ascend import ascend_optimizer
from collections import namedtuple from paddle.fluid import unique_name
from paddle.fluid.layer_helper import LayerHelper
Block = namedtuple('Block', ['program']) Block = namedtuple('Block', ['program'])
Loss = namedtuple('Loss', ['block']) Loss = namedtuple('Loss', ['block'])
......
...@@ -14,11 +14,13 @@ ...@@ -14,11 +14,13 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.contrib.sparsity.asp import ASPHelper from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
paddle.enable_static() paddle.enable_static()
......
...@@ -14,7 +14,9 @@ ...@@ -14,7 +14,9 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
......
...@@ -14,10 +14,12 @@ ...@@ -14,10 +14,12 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import numpy as np
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.contrib.sparsity.asp import ASPHelper from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
class MyLayer(paddle.nn.Layer): class MyLayer(paddle.nn.Layer):
......
...@@ -14,11 +14,13 @@ ...@@ -14,11 +14,13 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.contrib.sparsity.asp import ASPHelper from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
paddle.enable_static() paddle.enable_static()
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle
......
...@@ -14,11 +14,13 @@ ...@@ -14,11 +14,13 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.contrib.sparsity.asp import ASPHelper from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
paddle.enable_static() paddle.enable_static()
......
...@@ -14,11 +14,13 @@ ...@@ -14,11 +14,13 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.contrib.sparsity.asp import ASPHelper from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
class MyLayer(paddle.nn.Layer): class MyLayer(paddle.nn.Layer):
......
...@@ -13,11 +13,14 @@ ...@@ -13,11 +13,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import threading
import time
import unittest import unittest
import threading, time
import paddle
import numpy as np import numpy as np
import paddle
class TestASPUtils(unittest.TestCase): class TestASPUtils(unittest.TestCase):
def test_get_check_method(self): def test_get_check_method(self):
......
...@@ -13,13 +13,15 @@ ...@@ -13,13 +13,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle.distributed.fleet as fleet import os
import unittest import unittest
import numpy as np
import paddle import paddle
import paddle.distributed.fleet as fleet
import paddle.fluid.core as core import paddle.fluid.core as core
import os
from paddle.fluid.contrib.sparsity.asp import ASPHelper from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES') cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES')
if cuda_visible_devices is None or cuda_visible_devices == "": if cuda_visible_devices is None or cuda_visible_devices == "":
......
...@@ -13,14 +13,16 @@ ...@@ -13,14 +13,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle.distributed.fleet as fleet import os
import unittest import unittest
import numpy as np
import paddle import paddle
import paddle.distributed.fleet as fleet
import paddle.fluid as fluid import paddle.fluid as fluid
import os
from paddle.static import sparsity
from paddle.fluid.contrib.sparsity.asp import ASPHelper from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np from paddle.static import sparsity
cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES') cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES')
if cuda_visible_devices is None or cuda_visible_devices == "": if cuda_visible_devices is None or cuda_visible_devices == "":
......
...@@ -13,14 +13,16 @@ ...@@ -13,14 +13,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle.distributed.fleet as fleet import os
import unittest import unittest
import numpy as np
import paddle import paddle
import paddle.distributed.fleet as fleet
import paddle.fluid as fluid import paddle.fluid as fluid
import os
from paddle.static import sparsity
from paddle.fluid.contrib.sparsity.asp import ASPHelper from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np from paddle.static import sparsity
cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES') cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES')
if cuda_visible_devices is None or cuda_visible_devices == "": if cuda_visible_devices is None or cuda_visible_devices == "":
......
...@@ -12,15 +12,15 @@ ...@@ -12,15 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import paddle.fluid as fluid
import os import os
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp
from paddle.fluid.framework import program_guard
from paddle.fluid import unique_name from paddle.fluid import unique_name
from paddle.fluid.framework import program_guard
import numpy as np
BATCH_NUM = 4 BATCH_NUM = 4
BATCH_SIZE = 1 BATCH_SIZE = 1
......
...@@ -12,14 +12,15 @@ ...@@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import random import random
import unittest
import numpy as np import numpy as np
import paddle from get_gpt_model import FakeDataset, generate_model
import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import FakeDataset, generate_model
def apply_pass(use_amp=False, level=None): def apply_pass(use_amp=False, level=None):
......
...@@ -13,10 +13,11 @@ ...@@ -13,10 +13,11 @@
# limitations under the License. # limitations under the License.
import numpy as np import numpy as np
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F import paddle.nn.functional as F
import paddle.static as static
import paddle.utils as utils import paddle.utils as utils
from paddle.distributed import fleet from paddle.distributed import fleet
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
......
...@@ -12,18 +12,19 @@ ...@@ -12,18 +12,19 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle
import paddle.static as static
from paddle.distributed import fleet
import sys import sys
import numpy as np import numpy as np
import paddle
import paddle.static as static
from paddle.distributed import fleet
sys.path.append("..") sys.path.append("..")
import auto_parallel_gpt_model as modeling import auto_parallel_gpt_model as modeling
from auto_parallel_gpt_model import ( from auto_parallel_gpt_model import (
GPTModel,
GPTForPretraining, GPTForPretraining,
GPTModel,
GPTPretrainingCriterion, GPTPretrainingCriterion,
) )
......
...@@ -15,16 +15,18 @@ ...@@ -15,16 +15,18 @@
import paddle import paddle
import paddle.static as static import paddle.static as static
from paddle.distributed import fleet from paddle.distributed import fleet
from paddle.distributed.auto_parallel.cost import CostEstimator
from paddle.distributed.auto_parallel.cluster import Cluster from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cost import CostEstimator
from paddle.distributed.auto_parallel.dist_context import ( from paddle.distributed.auto_parallel.dist_context import (
get_default_distributed_context, get_default_distributed_context,
) )
def train(): def train():
from auto_parallel_relaunch_model import mlp_pretrain_forward from auto_parallel_relaunch_model import (
from auto_parallel_relaunch_model import batch_generator_creator batch_generator_creator,
mlp_pretrain_forward,
)
dist_strategy = fleet.DistributedStrategy() dist_strategy = fleet.DistributedStrategy()
# init parallel optimizer # init parallel optimizer
......
...@@ -12,14 +12,15 @@ ...@@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import random import random
import unittest
import numpy as np import numpy as np
import paddle from get_gpt_model import FakeDataset, generate_model
import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import FakeDataset, generate_model
paddle.enable_static() paddle.enable_static()
......
...@@ -12,17 +12,18 @@ ...@@ -12,17 +12,18 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import tempfile
import os import os
import tempfile
import numpy as np import numpy as np
import paddle import paddle
import paddle.static as static
import paddle.utils as utils
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.io import Dataset import paddle.static as static
import paddle.utils as utils
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.io import Dataset
paddle.enable_static() paddle.enable_static()
......
...@@ -12,15 +12,16 @@ ...@@ -12,15 +12,16 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import tempfile
import os import os
import tempfile
import numpy as np import numpy as np
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.io import Dataset
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.io import Dataset
paddle.enable_static() paddle.enable_static()
batch_size = 2 batch_size = 2
......
...@@ -12,9 +12,10 @@ ...@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import random
import sys import sys
import numpy as np import numpy as np
import random
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
...@@ -22,8 +23,8 @@ from paddle.distributed.fleet import auto ...@@ -22,8 +23,8 @@ from paddle.distributed.fleet import auto
sys.path.append("..") sys.path.append("..")
import auto_parallel_gpt_model as modeling import auto_parallel_gpt_model as modeling
from auto_parallel_gpt_model import ( from auto_parallel_gpt_model import (
GPTModel,
GPTForPretraining, GPTForPretraining,
GPTModel,
GPTPretrainingCriterion, GPTPretrainingCriterion,
) )
......
...@@ -12,14 +12,15 @@ ...@@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import random import random
import unittest
import numpy as np import numpy as np
import paddle from get_gpt_model import FakeDataset, generate_model
import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import FakeDataset, generate_model
paddle.enable_static() paddle.enable_static()
......
...@@ -12,8 +12,9 @@ ...@@ -12,8 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle
import numpy as np import numpy as np
import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.incubate.autograd import Hessian from paddle.incubate.autograd import Hessian
......
...@@ -12,13 +12,14 @@ ...@@ -12,13 +12,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import tempfile
import os import os
import tempfile
import numpy as np import numpy as np
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
paddle.enable_static() paddle.enable_static()
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import os import os
from paddle.distributed.fleet import launch from paddle.distributed.fleet import launch
from paddle.distributed.fleet.launch_utils import run_with_coverage from paddle.distributed.fleet.launch_utils import run_with_coverage
......
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from engine_api_dp import MyDataset
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from engine_api_dp import MyDataset
paddle.enable_static() paddle.enable_static()
batch_size = 16 batch_size = 16
......
...@@ -12,14 +12,15 @@ ...@@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import random import random
import unittest
import numpy as np import numpy as np
import paddle from get_gpt_model import FakeDataset, generate_model
import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import FakeDataset, generate_model
def apply_pass(use_recompute=False, no_recompute_segments=[]): def apply_pass(use_recompute=False, no_recompute_segments=[]):
......
...@@ -12,14 +12,15 @@ ...@@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import random import random
import unittest
import numpy as np import numpy as np
import paddle from get_gpt_model import FakeDataset, generate_model
import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import FakeDataset, generate_model
paddle.enable_static() paddle.enable_static()
......
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import tempfile
import unittest
import os
import sys
import json import json
import os
import subprocess import subprocess
import sys
import tempfile
import unittest
cluster_json = """ cluster_json = """
{ {
......
...@@ -12,38 +12,35 @@ ...@@ -12,38 +12,35 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import os
import json import json
import os
import tempfile import tempfile
import unittest
from test_cluster import cluster_json
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F import paddle.nn.functional as F
import paddle.static as static
import paddle.utils as utils import paddle.utils as utils
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed import fleet from paddle.distributed import fleet
from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer
from paddle.distributed.auto_parallel.cluster import Cluster from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cost.base_cost import ( from paddle.distributed.auto_parallel.completion import Completer
build_comp_desc_from_dist_op, from paddle.distributed.auto_parallel.cost import (
) AllreduceSumOpCost,
from paddle.distributed.auto_parallel.cost.base_cost import ( _g_op_cost_factory,
build_comm_desc_from_dist_op,
) )
from paddle.distributed.auto_parallel.cost.base_cost import ( from paddle.distributed.auto_parallel.cost.base_cost import (
build_comm_costs_from_descs, build_comm_costs_from_descs,
) build_comm_desc_from_dist_op,
from paddle.distributed.auto_parallel.cost.base_cost import (
build_comp_costs_from_descs, build_comp_costs_from_descs,
build_comp_desc_from_dist_op,
build_dp_costs,
) )
from paddle.distributed.auto_parallel.cost.base_cost import build_dp_costs from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.cost import AllreduceSumOpCost from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer
from paddle.distributed.auto_parallel.cost import _g_op_cost_factory from paddle.distributed.fleet import auto
from test_cluster import cluster_json
paddle.enable_static() paddle.enable_static()
_global_parallel_strategy = "dp_mp_pp" _global_parallel_strategy = "dp_mp_pp"
......
...@@ -12,13 +12,15 @@ ...@@ -12,13 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import json
import os
import tempfile import tempfile
import unittest import unittest
import os
import json
from paddle.distributed.auto_parallel.cluster import Cluster from paddle.distributed.auto_parallel.cluster import (
from paddle.distributed.auto_parallel.cluster import get_default_cluster Cluster,
get_default_cluster,
)
cluster_json = """ cluster_json = """
{ {
......
...@@ -13,9 +13,8 @@ ...@@ -13,9 +13,8 @@
# limitations under the License # limitations under the License
import unittest import unittest
from paddle.distributed.auto_parallel.cluster_v2 import Device
from paddle.distributed.auto_parallel.cluster_v2 import Link from paddle.distributed.auto_parallel.cluster_v2 import Device, DeviceMesh, Link
from paddle.distributed.auto_parallel.cluster_v2 import DeviceMesh
class TestDeviceMesh(unittest.TestCase): class TestDeviceMesh(unittest.TestCase):
......
...@@ -12,23 +12,25 @@ ...@@ -12,23 +12,25 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import os
import json import json
import os
import tempfile import tempfile
import unittest
from test_cluster import cluster_json, multi_cluster_json
import paddle import paddle
from paddle.distributed.auto_parallel.cluster import Cluster from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cost import CommContext from paddle.distributed.auto_parallel.cost import (
from paddle.distributed.auto_parallel.cost import build_comm_desc AllgatherOpCost,
from paddle.distributed.auto_parallel.cost import AllreduceSumOpCost AllreduceSumOpCost,
from paddle.distributed.auto_parallel.cost import AllgatherOpCost BroadcastOpCost,
from paddle.distributed.auto_parallel.cost import BroadcastOpCost CommContext,
from paddle.distributed.auto_parallel.cost import SendOpCost IdentityOpCost,
from paddle.distributed.auto_parallel.cost import RecvOpCost RecvOpCost,
from paddle.distributed.auto_parallel.cost import IdentityOpCost SendOpCost,
build_comm_desc,
from test_cluster import cluster_json, multi_cluster_json )
class TestCommOpCost(unittest.TestCase): class TestCommOpCost(unittest.TestCase):
......
...@@ -12,131 +12,81 @@ ...@@ -12,131 +12,81 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import os
import json import json
import os
import unittest
from test_cluster import cluster_json
from paddle.distributed.auto_parallel.cluster import Cluster from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cost.comp_op_cost import AssignOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import AssignValueOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import BeamSearchOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import ( from paddle.distributed.auto_parallel.cost.comp_op_cost import (
AssignOpCost,
AssignValueOpCost,
BeamSearchDecodeOpCost, BeamSearchDecodeOpCost,
) BeamSearchOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import CastOpCost CastOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import ConcatOpCost ConcatOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import ( DropoutGradOpCost,
ElementwiseAddOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ElementwiseAddGradOpCost, ElementwiseAddGradOpCost,
) ElementwiseAddOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ElementwiseDivOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ElementwiseDivGradOpCost, ElementwiseDivGradOpCost,
) ElementwiseDivOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ElementwiseMulOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ElementwiseMulGradOpCost, ElementwiseMulGradOpCost,
) ElementwiseMulOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ElementwiseSubOpCost, ElementwiseSubOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import EmbeddingOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
EmbeddingGradOpCost, EmbeddingGradOpCost,
) EmbeddingOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
FillConstantOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
FillConstantBatchSizeLikeOpCost, FillConstantBatchSizeLikeOpCost,
) FillConstantOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import GatherOpCost FusedSoftmaxMaskUpperTriangleGradOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import GeluOpCost FusedSoftmaxMaskUpperTriangleOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import GeluGradOpCost GatherOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import ( GeluGradOpCost,
GeluOpCost,
GreaterEqualOpCost, GreaterEqualOpCost,
) IncrementOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import IncrementOpCost IsEmptyOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import IsEmptyOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import LayerNormOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
LayerNormGradOpCost, LayerNormGradOpCost,
) LayerNormOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import LessThanOpCost LessThanOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import LogicalNotOpCost LodResetOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import LogicalAndOpCost LogicalAndOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import LodResetOpCost LogicalNotOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import LogOpCost LogOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
LookupTableV2OpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
LookupTableV2GradOpCost, LookupTableV2GradOpCost,
) LookupTableV2OpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulOpCost MatmulOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulV2OpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
MatmulV2GradOpCost, MatmulV2GradOpCost,
) MatmulV2OpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import MemcpyOpCost MemcpyOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import MulOpCost MulGradOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import MulGradOpCost MulOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import OneHotOpCost OneHotOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ReadFromArrayOpCost, ReadFromArrayOpCost,
) ReduceMeanGradOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import ReduceSumOpCost ReduceMeanOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
ReduceSumGradOpCost, ReduceSumGradOpCost,
) ReduceSumOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import Reshape2OpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
Reshape2GradOpCost, Reshape2GradOpCost,
) Reshape2OpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import ReduceMeanOpCost SamplingIdOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import ( ScaleOpCost,
ReduceMeanGradOpCost, SliceOpCost,
) SoftmaxGradOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import SamplingIdOpCost SoftmaxOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import ScaleOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import SliceOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import SoftmaxOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import SoftmaxGradOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
SoftmaxWithCrossEntropyOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
SoftmaxWithCrossEntropyGradOpCost, SoftmaxWithCrossEntropyGradOpCost,
) SoftmaxWithCrossEntropyOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import SplitOpCost SplitOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import Squeeze2OpCost SquareGradOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import SquareOpCost SquareOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import SquareGradOpCost Squeeze2OpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import SumOpCost SumOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import TopKOpCost TopKOpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import Transpose2OpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
Transpose2GradOpCost, Transpose2GradOpCost,
) Transpose2OpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import Unsqueeze2OpCost Unsqueeze2OpCost,
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
WriteToArrayOpCost, WriteToArrayOpCost,
) )
from paddle.distributed.auto_parallel.cost.comp_op_cost import DropoutGradOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
FusedSoftmaxMaskUpperTriangleOpCost,
)
from paddle.distributed.auto_parallel.cost.comp_op_cost import (
FusedSoftmaxMaskUpperTriangleGradOpCost,
)
from test_cluster import cluster_json
class TestCompOpCost(unittest.TestCase): class TestCompOpCost(unittest.TestCase):
......
...@@ -17,9 +17,8 @@ import unittest ...@@ -17,9 +17,8 @@ import unittest
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.static import InputSpec
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.static import InputSpec
class MLPLayer(nn.Layer): class MLPLayer(nn.Layer):
......
...@@ -12,11 +12,12 @@ ...@@ -12,11 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import tempfile
import unittest
import os import os
import sys
import subprocess import subprocess
import sys
import tempfile
import unittest
from paddle.distributed.auto_parallel.converter import Converter from paddle.distributed.auto_parallel.converter import Converter
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
...@@ -38,8 +39,8 @@ def make_program(): ...@@ -38,8 +39,8 @@ def make_program():
def parallelizer(program_func, rank): def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func() main_program, start_program = program_func()
......
...@@ -12,31 +12,29 @@ ...@@ -12,31 +12,29 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License # limitations under the License
import unittest
import copy import copy
import unittest
import paddle
import numpy as np import numpy as np
import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F import paddle.nn.functional as F
import paddle.static as static
from paddle.distributed import fleet from paddle.distributed import fleet
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.dist_context import ( from paddle.distributed.auto_parallel.dist_context import (
DistributedContext, DistributedContext,
set_default_distributed_context, set_default_distributed_context,
) )
from paddle.distributed.auto_parallel.process_mesh_v2 import ProcessMesh
from paddle.distributed.auto_parallel.utils import ( from paddle.distributed.auto_parallel.utils import (
_copy_dist_attr_to_cpp,
_copy_dist_attr_from_cpp, _copy_dist_attr_from_cpp,
_copy_dist_attr_to_cpp_for_graph,
_copy_dist_attr_from_cpp_for_graph, _copy_dist_attr_from_cpp_for_graph,
_copy_dist_attr_to_cpp,
_copy_dist_attr_to_cpp_for_graph,
) )
from paddle.distributed.fleet import auto
from paddle.fluid.core import TensorDistAttr from paddle.fluid.core import OperatorDistAttr, TensorDistAttr
from paddle.fluid.core import OperatorDistAttr
from paddle.distributed.auto_parallel.process_mesh_v2 import ProcessMesh
paddle.enable_static() paddle.enable_static()
......
...@@ -12,18 +12,18 @@ ...@@ -12,18 +12,18 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import copy import copy
import unittest
import paddle
import numpy as np import numpy as np
import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.static as static
import paddle.nn.functional as F import paddle.nn.functional as F
import paddle.static as static
from paddle.distributed import fleet from paddle.distributed import fleet
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.fleet import auto
paddle.enable_static() paddle.enable_static()
......
...@@ -13,11 +13,12 @@ ...@@ -13,11 +13,12 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import paddle
from paddle.distributed.fleet import auto
from test_dist_pnorm import parallelizer from test_dist_pnorm import parallelizer
import paddle
from paddle.distributed.fleet import auto
paddle.enable_static() paddle.enable_static()
......
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid import program_guard from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
...@@ -106,8 +106,8 @@ def matmulv2_dp2mp2(init_x, init_y, trans_x, trans_y): ...@@ -106,8 +106,8 @@ def matmulv2_dp2mp2(init_x, init_y, trans_x, trans_y):
def parallelizer(program_func, *args, **kwargs): def parallelizer(program_func, *args, **kwargs):
from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program, loss = program_func(*args, **kwargs) main_program, start_program, loss = program_func(*args, **kwargs)
......
...@@ -12,21 +12,18 @@ ...@@ -12,21 +12,18 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import copy import copy
import unittest
import paddle import paddle
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.cluster import Cluster from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.operators.common import ( from paddle.distributed.auto_parallel.operators.common import (
get_distributed_operator_impl_container, get_distributed_operator_impl_container,
is_elementwise_op, is_elementwise_op,
) )
from paddle.distributed.fleet import auto
from paddle.fluid import program_guard from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.backward import append_backward
paddle.enable_static() paddle.enable_static()
......
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid import program_guard from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
...@@ -50,8 +50,8 @@ def make_program_serial(): ...@@ -50,8 +50,8 @@ def make_program_serial():
def parallelizer(program_func, rank): def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program, loss = program_func() main_program, start_program, loss = program_func()
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
...@@ -37,8 +38,8 @@ def make_program_dp2(): ...@@ -37,8 +38,8 @@ def make_program_dp2():
def parallelizer(program_func, rank): def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func() main_program, start_program = program_func()
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
...@@ -34,8 +35,8 @@ def make_program(): ...@@ -34,8 +35,8 @@ def make_program():
def parallelizer(program_func, rank): def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func() main_program, start_program = program_func()
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
...@@ -34,8 +35,8 @@ def make_program(): ...@@ -34,8 +35,8 @@ def make_program():
def parallelizer(program_func, rank): def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func() main_program, start_program = program_func()
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
...@@ -56,8 +57,8 @@ def make_program_serial(): ...@@ -56,8 +57,8 @@ def make_program_serial():
def parallelizer(program_func, rank): def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func() main_program, start_program = program_func()
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
...@@ -34,8 +35,8 @@ def make_program_dp2(): ...@@ -34,8 +35,8 @@ def make_program_dp2():
def parallelizer(program_func, rank): def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func() main_program, start_program = program_func()
......
...@@ -12,11 +12,11 @@ ...@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import tempfile
import unittest
import os import os
import sys
import subprocess import subprocess
import sys
import tempfile
import unittest
class TestEngineAPI(unittest.TestCase): class TestEngineAPI(unittest.TestCase):
......
...@@ -12,11 +12,11 @@ ...@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import tempfile
import unittest
import os import os
import sys
import subprocess import subprocess
import sys
import tempfile
import unittest
class TestEngineAPI(unittest.TestCase): class TestEngineAPI(unittest.TestCase):
......
...@@ -13,14 +13,15 @@ ...@@ -13,14 +13,15 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.static as static
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.io import Dataset import paddle.static as static
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.io import Dataset
paddle.enable_static() paddle.enable_static()
......
...@@ -12,20 +12,19 @@ ...@@ -12,20 +12,19 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest import random
import tempfile
import shutil import shutil
import tempfile
import time import time
import random import unittest
import paddle import paddle
import paddle.vision.transforms as T import paddle.vision.transforms as T
from paddle.static import InputSpec
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.callbacks import config_callbacks from paddle.distributed.auto_parallel.callbacks import config_callbacks
from paddle.vision.models import LeNet from paddle.distributed.fleet import auto
from paddle.static import InputSpec
from paddle.vision.datasets import MNIST from paddle.vision.datasets import MNIST
from paddle.vision.models import LeNet
paddle.enable_static() paddle.enable_static()
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import copy import copy
import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
...@@ -65,8 +65,8 @@ def make_program(): ...@@ -65,8 +65,8 @@ def make_program():
def parallelizer(program_func, rank): def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.partitioner import Partitioner
main_program, start_program = program_func() main_program, start_program = program_func()
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
import sys import sys
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle
...@@ -22,8 +23,8 @@ import paddle.static as static ...@@ -22,8 +23,8 @@ import paddle.static as static
sys.path.append("..") sys.path.append("..")
import auto_parallel_gpt_model as modeling import auto_parallel_gpt_model as modeling
from auto_parallel_gpt_model import ( from auto_parallel_gpt_model import (
GPTModel,
GPTForPretraining, GPTForPretraining,
GPTModel,
GPTPretrainingCriterion, GPTPretrainingCriterion,
) )
...@@ -111,12 +112,12 @@ class TestGroupOperators(unittest.TestCase): ...@@ -111,12 +112,12 @@ class TestGroupOperators(unittest.TestCase):
sequence_len, sequence_len,
vocab_size, vocab_size,
) )
from paddle.distributed.auto_parallel.tuner.rule_based_tuner import (
RuleBasedTuner,
)
from paddle.distributed.auto_parallel.dist_context import ( from paddle.distributed.auto_parallel.dist_context import (
DistributedContext, DistributedContext,
) )
from paddle.distributed.auto_parallel.tuner.rule_based_tuner import (
RuleBasedTuner,
)
dist_context = DistributedContext() dist_context = DistributedContext()
tuner = RuleBasedTuner(dist_context) tuner = RuleBasedTuner(dist_context)
......
...@@ -12,11 +12,11 @@ ...@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import tempfile
import unittest
import os import os
import sys
import subprocess import subprocess
import sys
import tempfile
import unittest
class TestHighOrderGrad(unittest.TestCase): class TestHighOrderGrad(unittest.TestCase):
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册