未验证 提交 632ea575 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][F401] remove unused imports in unittests/auto_parallel,distribution (#46915)

上级 0784f961
...@@ -13,14 +13,13 @@ ...@@ -13,14 +13,13 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import sys
import random import random
import numpy as np import numpy as np
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import generate_model, create_data_holder, FakeDataset from get_gpt_model import FakeDataset, generate_model
def apply_pass(use_amp=False, level=None): def apply_pass(use_amp=False, level=None):
......
...@@ -12,21 +12,12 @@ ...@@ -12,21 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import time
import paddle.fluid as fluid
import copy
import os
import numpy as np import numpy as np
import subprocess
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.fluid as fluid
import paddle.static as static import paddle.static as static
import paddle.nn.functional as F import paddle.nn.functional as F
import paddle.utils as utils import paddle.utils as utils
from paddle.fluid import layers
from paddle.io import IterableDataset, DataLoader
from paddle.distributed import fleet from paddle.distributed import fleet
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
......
...@@ -19,10 +19,6 @@ import sys ...@@ -19,10 +19,6 @@ import sys
import numpy as np import numpy as np
from paddle.distributed.fleet import auto
from auto_parallel_relaunch_model import mlp_pretrain_forward
from auto_parallel_relaunch_model import batch_generator_creator
sys.path.append("..") sys.path.append("..")
import auto_parallel_gpt_model as modeling import auto_parallel_gpt_model as modeling
from auto_parallel_gpt_model import GPTModel, GPTForPretraining, GPTPretrainingCriterion from auto_parallel_gpt_model import GPTModel, GPTForPretraining, GPTPretrainingCriterion
......
...@@ -13,14 +13,13 @@ ...@@ -13,14 +13,13 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import sys
import random import random
import numpy as np import numpy as np
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import generate_model, create_data_holder, FakeDataset from get_gpt_model import FakeDataset, generate_model
paddle.enable_static() paddle.enable_static()
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import numpy as np import numpy as np
import paddle import paddle
......
...@@ -12,26 +12,15 @@ ...@@ -12,26 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import time
import tempfile import tempfile
import copy
import os import os
import numpy as np import numpy as np
import subprocess
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.fluid as fluid
import paddle.static as static
import paddle.nn.functional as F import paddle.nn.functional as F
import paddle.utils as utils from paddle.io import Dataset
from paddle.fluid import layers
from paddle.io import Dataset, IterableDataset, DataLoader
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.interface import get_collection, CollectionNames
from paddle.optimizer.lr import CosineAnnealingDecay
from paddle.fluid.dataloader.collate import default_collate_fn
paddle.enable_static() paddle.enable_static()
global_process_mesh = auto.ProcessMesh(mesh=[0, 1]) global_process_mesh = auto.ProcessMesh(mesh=[0, 1])
......
...@@ -12,21 +12,13 @@ ...@@ -12,21 +12,13 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import time
import tempfile import tempfile
import copy
import os import os
import numpy as np import numpy as np
import subprocess
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.fluid as fluid
import paddle.static as static
import paddle.nn.functional as F import paddle.nn.functional as F
import paddle.utils as utils from paddle.io import Dataset
from paddle.fluid import layers
from paddle.io import Dataset, DataLoader
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
......
...@@ -13,14 +13,13 @@ ...@@ -13,14 +13,13 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import sys
import random import random
import numpy as np import numpy as np
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import generate_model, create_data_holder, FakeDataset from get_gpt_model import FakeDataset, generate_model
paddle.enable_static() paddle.enable_static()
......
...@@ -12,9 +12,7 @@ ...@@ -12,9 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import random
import paddle import paddle
import unittest
import numpy as np import numpy as np
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.incubate.autograd import Hessian from paddle.incubate.autograd import Hessian
......
...@@ -12,26 +12,14 @@ ...@@ -12,26 +12,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import time
import tempfile import tempfile
import copy
import os import os
import numpy as np import numpy as np
import subprocess
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.fluid as fluid
import paddle.static as static
import paddle.nn.functional as F import paddle.nn.functional as F
import paddle.utils as utils
from paddle.fluid import layers
from paddle.io import Dataset, IterableDataset, DataLoader
from paddle.static import InputSpec
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.optimizer.lr import CosineAnnealingDecay
from paddle.fluid.dataloader.collate import default_collate_fn
paddle.enable_static() paddle.enable_static()
global_process_mesh = auto.ProcessMesh(mesh=[0, 1]) global_process_mesh = auto.ProcessMesh(mesh=[0, 1])
......
...@@ -12,21 +12,9 @@ ...@@ -12,21 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import time
import tempfile
import copy
import os
import numpy as np
import subprocess
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.fluid as fluid
import paddle.static as static
import paddle.nn.functional as F import paddle.nn.functional as F
import paddle.utils as utils
from paddle.fluid import layers
from paddle.io import Dataset, IterableDataset, DataLoader
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from engine_api_dp import MyDataset from engine_api_dp import MyDataset
......
...@@ -13,14 +13,13 @@ ...@@ -13,14 +13,13 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import sys
import random import random
import numpy as np import numpy as np
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import generate_model, create_data_holder, FakeDataset from get_gpt_model import FakeDataset, generate_model
def apply_pass(use_recompute=False): def apply_pass(use_recompute=False):
......
...@@ -13,14 +13,13 @@ ...@@ -13,14 +13,13 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import sys
import random import random
import numpy as np import numpy as np
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from get_gpt_model import generate_model, create_data_holder, FakeDataset from get_gpt_model import FakeDataset, generate_model
paddle.enable_static() paddle.enable_static()
......
...@@ -17,9 +17,7 @@ import unittest ...@@ -17,9 +17,7 @@ import unittest
import os import os
import sys import sys
import json import json
import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
cluster_json = """ cluster_json = """
{ {
......
...@@ -27,11 +27,7 @@ from paddle.distributed.auto_parallel.completion import Completer ...@@ -27,11 +27,7 @@ from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed import fleet from paddle.distributed import fleet
from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer from paddle.distributed.auto_parallel.parallelizer import AutoParallelizer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.reshard import Resharder
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
from paddle.distributed.auto_parallel.cluster import Cluster from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cost import CommContext
from paddle.distributed.auto_parallel.cost.base_cost import build_comp_desc_from_dist_op from paddle.distributed.auto_parallel.cost.base_cost import build_comp_desc_from_dist_op
from paddle.distributed.auto_parallel.cost.base_cost import build_comm_desc_from_dist_op from paddle.distributed.auto_parallel.cost.base_cost import build_comm_desc_from_dist_op
from paddle.distributed.auto_parallel.cost.base_cost import build_comm_costs_from_descs from paddle.distributed.auto_parallel.cost.base_cost import build_comm_costs_from_descs
......
...@@ -17,7 +17,6 @@ import unittest ...@@ -17,7 +17,6 @@ import unittest
import os import os
import json import json
import paddle
from paddle.distributed.auto_parallel.cluster import Cluster from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cluster import get_default_cluster from paddle.distributed.auto_parallel.cluster import get_default_cluster
......
...@@ -16,7 +16,6 @@ import unittest ...@@ -16,7 +16,6 @@ import unittest
import os import os
import json import json
import paddle
from paddle.distributed.auto_parallel.cluster import Cluster from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cost.comp_op_cost import AssignOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import AssignOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import AssignValueOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import AssignValueOpCost
...@@ -51,7 +50,6 @@ from paddle.distributed.auto_parallel.cost.comp_op_cost import LogOpCost ...@@ -51,7 +50,6 @@ from paddle.distributed.auto_parallel.cost.comp_op_cost import LogOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import LookupTableV2OpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import LookupTableV2OpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import LookupTableV2GradOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import LookupTableV2GradOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulGradOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulV2OpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulV2OpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulV2GradOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import MatmulV2GradOpCost
from paddle.distributed.auto_parallel.cost.comp_op_cost import MemcpyOpCost from paddle.distributed.auto_parallel.cost.comp_op_cost import MemcpyOpCost
......
...@@ -16,9 +16,7 @@ import tempfile ...@@ -16,9 +16,7 @@ import tempfile
import unittest import unittest
import os import os
import sys import sys
import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
from paddle.distributed.auto_parallel.converter import Converter from paddle.distributed.auto_parallel.converter import Converter
......
...@@ -14,8 +14,6 @@ ...@@ -14,8 +14,6 @@
import unittest import unittest
import paddle import paddle
import numpy as np
import paddle.nn as nn
import paddle.static as static import paddle.static as static
from paddle.fluid.core import TensorDistAttr from paddle.fluid.core import TensorDistAttr
from paddle.fluid.core import OperatorDistAttr from paddle.fluid.core import OperatorDistAttr
......
...@@ -13,21 +13,17 @@ ...@@ -13,21 +13,17 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import os
import json
import copy import copy
import paddle import paddle
import numpy as np import numpy as np
import paddle.nn as nn import paddle.nn as nn
import paddle.utils as utils
import paddle.static as static import paddle.static as static
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.distributed import fleet from paddle.distributed import fleet
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
paddle.enable_static() paddle.enable_static()
......
...@@ -16,9 +16,6 @@ import unittest ...@@ -16,9 +16,6 @@ import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
from test_dist_pnorm import parallelizer from test_dist_pnorm import parallelizer
paddle.enable_static() paddle.enable_static()
......
...@@ -18,7 +18,6 @@ from paddle.distributed.fleet import auto ...@@ -18,7 +18,6 @@ from paddle.distributed.fleet import auto
from paddle.fluid import program_guard from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
paddle.enable_static() paddle.enable_static()
......
...@@ -29,7 +29,6 @@ paddle.enable_static() ...@@ -29,7 +29,6 @@ paddle.enable_static()
def parallelizer(program_func, rank): def parallelizer(program_func, rank):
from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.dist_context import DistributedContext from paddle.distributed.auto_parallel.dist_context import DistributedContext
main_program, startup_program, loss = program_func() main_program, startup_program, loss = program_func()
......
...@@ -18,7 +18,6 @@ from paddle.distributed.fleet import auto ...@@ -18,7 +18,6 @@ from paddle.distributed.fleet import auto
from paddle.fluid import program_guard from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
paddle.enable_static() paddle.enable_static()
......
...@@ -16,10 +16,6 @@ import unittest ...@@ -16,10 +16,6 @@ import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid import program_guard
from paddle.fluid.backward import append_backward
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
paddle.enable_static() paddle.enable_static()
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
import unittest import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
paddle.enable_static() paddle.enable_static()
......
...@@ -16,9 +16,6 @@ import unittest ...@@ -16,9 +16,6 @@ import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid import program_guard
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
paddle.enable_static() paddle.enable_static()
......
...@@ -16,9 +16,7 @@ import tempfile ...@@ -16,9 +16,7 @@ import tempfile
import unittest import unittest
import os import os
import sys import sys
import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
class TestEngineAPI(unittest.TestCase): class TestEngineAPI(unittest.TestCase):
......
...@@ -16,9 +16,7 @@ import tempfile ...@@ -16,9 +16,7 @@ import tempfile
import unittest import unittest
import os import os
import sys import sys
import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
class TestEngineAPI(unittest.TestCase): class TestEngineAPI(unittest.TestCase):
......
...@@ -16,9 +16,7 @@ import tempfile ...@@ -16,9 +16,7 @@ import tempfile
import unittest import unittest
import os import os
import sys import sys
import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
class TestHighOrderGrad(unittest.TestCase): class TestHighOrderGrad(unittest.TestCase):
......
...@@ -14,15 +14,12 @@ ...@@ -14,15 +14,12 @@
import unittest import unittest
import paddle import paddle
import paddle.fluid as fluid
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
import paddle.static as static import paddle.static as static
import paddle.distributed as dist
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context
from paddle.distributed.auto_parallel.process_mesh import ProcessMesh from paddle.distributed.auto_parallel.process_mesh import ProcessMesh
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
paddle.enable_static() paddle.enable_static()
......
...@@ -16,9 +16,7 @@ import tempfile ...@@ -16,9 +16,7 @@ import tempfile
import unittest import unittest
import os import os
import sys import sys
import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
class TestEngineAPI(unittest.TestCase): class TestEngineAPI(unittest.TestCase):
......
...@@ -14,18 +14,8 @@ ...@@ -14,18 +14,8 @@
import unittest import unittest
import os
import numpy as np
import paddle import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
import paddle.distributed.fleet as fleet
from paddle.io import Dataset
from paddle.static import InputSpec
from paddle.fluid.framework import _non_static_mode
from test_to_static import MLPLayer, MyDataset from test_to_static import MLPLayer, MyDataset
......
...@@ -25,7 +25,7 @@ from paddle.distributed.auto_parallel.cost.base_cost import build_comp_desc_str_ ...@@ -25,7 +25,7 @@ from paddle.distributed.auto_parallel.cost.base_cost import build_comp_desc_str_
from paddle.distributed.auto_parallel.cost.base_cost import calc_time_by_modeling from paddle.distributed.auto_parallel.cost.base_cost import calc_time_by_modeling
from paddle.distributed.auto_parallel.cluster import Cluster from paddle.distributed.auto_parallel.cluster import Cluster
from paddle.distributed.auto_parallel.cost import CommContext from paddle.distributed.auto_parallel.cost import CommContext
from test_cluster import cluster_json, multi_cluster_json from test_cluster import cluster_json
paddle.enable_static() paddle.enable_static()
......
...@@ -18,7 +18,6 @@ import os ...@@ -18,7 +18,6 @@ import os
import sys import sys
import shutil import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
class TestOptimizationTunerAPI(unittest.TestCase): class TestOptimizationTunerAPI(unittest.TestCase):
......
...@@ -16,9 +16,7 @@ import tempfile ...@@ -16,9 +16,7 @@ import tempfile
import unittest import unittest
import os import os
import sys import sys
import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
class TestAMPPass(unittest.TestCase): class TestAMPPass(unittest.TestCase):
......
...@@ -16,9 +16,7 @@ import tempfile ...@@ -16,9 +16,7 @@ import tempfile
import unittest import unittest
import os import os
import sys import sys
import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
class TestGradientClip(unittest.TestCase): class TestGradientClip(unittest.TestCase):
......
...@@ -16,9 +16,7 @@ import tempfile ...@@ -16,9 +16,7 @@ import tempfile
import unittest import unittest
import os import os
import sys import sys
import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
class TestGradientMergePass(unittest.TestCase): class TestGradientMergePass(unittest.TestCase):
......
...@@ -13,13 +13,10 @@ ...@@ -13,13 +13,10 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import sys
import random
import numpy as np
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from get_gpt_model import generate_model, create_data_holder, FakeDataset from get_gpt_model import FakeDataset, generate_model
paddle.enable_static() paddle.enable_static()
......
...@@ -16,9 +16,7 @@ import tempfile ...@@ -16,9 +16,7 @@ import tempfile
import unittest import unittest
import os import os
import sys import sys
import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
class TestRecomputePass(unittest.TestCase): class TestRecomputePass(unittest.TestCase):
......
...@@ -16,9 +16,7 @@ import tempfile ...@@ -16,9 +16,7 @@ import tempfile
import unittest import unittest
import os import os
import sys import sys
import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
class TestShardingPass(unittest.TestCase): class TestShardingPass(unittest.TestCase):
......
...@@ -16,15 +16,13 @@ import unittest ...@@ -16,15 +16,13 @@ import unittest
import paddle import paddle
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.fluid import program_guard from paddle.incubate.autograd import enable_prim
from paddle.incubate.autograd import prim2orig, enable_prim, prim_enabled
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.utils import set_var_dist_attr from paddle.distributed.auto_parallel.utils import set_var_dist_attr
from paddle.distributed.auto_parallel.dist_context import DistributedContext, get_default_distributed_context, set_default_distributed_context from paddle.distributed.auto_parallel.dist_context import DistributedContext, get_default_distributed_context
paddle.enable_static() paddle.enable_static()
enable_prim() enable_prim()
......
...@@ -15,14 +15,11 @@ ...@@ -15,14 +15,11 @@
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
import paddle.static as static import paddle.static as static
from paddle.distributed.fleet import auto
from paddle.distributed.auto_parallel.process_mesh import ProcessMesh from paddle.distributed.auto_parallel.process_mesh import ProcessMesh
from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
paddle.enable_static() paddle.enable_static()
......
...@@ -17,9 +17,7 @@ import unittest ...@@ -17,9 +17,7 @@ import unittest
import os import os
import sys import sys
import json import json
import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
class TestPlannerReLaunch(unittest.TestCase): class TestPlannerReLaunch(unittest.TestCase):
......
...@@ -17,9 +17,7 @@ import unittest ...@@ -17,9 +17,7 @@ import unittest
import os import os
import sys import sys
import json import json
import shutil
import subprocess import subprocess
from paddle.distributed.fleet.launch_utils import run_with_coverage
class TestPlannerReLaunch(unittest.TestCase): class TestPlannerReLaunch(unittest.TestCase):
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
# import yaml # import yaml
import unittest import unittest
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
......
...@@ -14,14 +14,12 @@ ...@@ -14,14 +14,12 @@
import unittest import unittest
import os
import numpy as np import numpy as np
import paddle import paddle
import paddle.nn as nn import paddle.nn as nn
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
import paddle.distributed.fleet as fleet
from paddle import LazyGuard from paddle import LazyGuard
from paddle.io import Dataset from paddle.io import Dataset
......
...@@ -16,19 +16,13 @@ import unittest ...@@ -16,19 +16,13 @@ import unittest
import paddle import paddle
import numpy as np import numpy as np
import paddle.nn as nn import paddle.nn as nn
import paddle.utils as utils
import paddle.static as static import paddle.static as static
import paddle.nn.functional as F import paddle.nn.functional as F
from paddle.distributed.fleet import auto from paddle.distributed.fleet import auto
from paddle.distributed import fleet from paddle.distributed import fleet
from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner from paddle.distributed.auto_parallel.dist_context import DistributedContext
from paddle.distributed.auto_parallel.utils import make_data_unshard
from paddle.distributed.auto_parallel.dist_attribute import OperatorDistributedAttribute, TensorDistributedAttribute
from paddle.distributed.auto_parallel.dist_context import DistributedContext, get_default_distributed_context
from paddle.distributed.auto_parallel.operators import find_compatible_distributed_operator_impls
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
paddle.enable_static() paddle.enable_static()
......
...@@ -16,7 +16,6 @@ import unittest ...@@ -16,7 +16,6 @@ import unittest
import paddle import paddle
import numpy as np import numpy as np
import paddle.nn as nn import paddle.nn as nn
import paddle.utils as utils
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.static as static import paddle.static as static
import paddle.nn.functional as F import paddle.nn.functional as F
...@@ -26,10 +25,7 @@ from paddle.distributed import fleet ...@@ -26,10 +25,7 @@ from paddle.distributed import fleet
from paddle.distributed.auto_parallel.completion import Completer from paddle.distributed.auto_parallel.completion import Completer
from paddle.distributed.auto_parallel.partitioner import Partitioner from paddle.distributed.auto_parallel.partitioner import Partitioner
from paddle.distributed.auto_parallel.utils import make_data_unshard from paddle.distributed.auto_parallel.utils import make_data_unshard
from paddle.distributed.auto_parallel.dist_attribute import OperatorDistributedAttribute, TensorDistributedAttribute from paddle.distributed.auto_parallel.dist_context import get_default_distributed_context
from paddle.distributed.auto_parallel.dist_context import DistributedContext, get_default_distributed_context
from paddle.distributed.auto_parallel.operators import find_compatible_distributed_operator_impls
from paddle.distributed.auto_parallel.utils import print_program_with_dist_attr
paddle.enable_static() paddle.enable_static()
......
...@@ -12,21 +12,14 @@ ...@@ -12,21 +12,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import re
import sys import sys
import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid.core as core
import paddle.fluid.dygraph as dg
import paddle.static as static
import scipy.stats import scipy.stats
from numpy.random import random as rand
sys.path.append("../") sys.path.append("../")
from op_test import OpTest from op_test import OpTest
from paddle.fluid import Program, program_guard
paddle.enable_static() paddle.enable_static()
......
...@@ -12,12 +12,10 @@ ...@@ -12,12 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import math
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle import fluid
from paddle.distribution import * from paddle.distribution import *
from paddle.fluid import layers from paddle.fluid import layers
......
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
import paddle import paddle
import scipy.stats import scipy.stats
import config
from config import ATOL, DEVICES, RTOL from config import ATOL, DEVICES, RTOL
from parameterize import TEST_CASE_NAME, parameterize_cls, place, xrand from parameterize import TEST_CASE_NAME, parameterize_cls, place, xrand
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import numbers
import unittest import unittest
import numpy as np import numpy as np
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import math
import unittest import unittest
import numpy as np import numpy as np
......
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
import paddle import paddle
from paddle.distribution import constraint from paddle.distribution import constraint
import config
import parameterize as param import parameterize as param
np.random.seed(2022) np.random.seed(2022)
......
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
import paddle import paddle
import scipy.stats import scipy.stats
import config
from config import ATOL, DEVICES, RTOL from config import ATOL, DEVICES, RTOL
import parameterize as param import parameterize as param
......
...@@ -19,7 +19,7 @@ import paddle ...@@ -19,7 +19,7 @@ import paddle
import scipy.stats import scipy.stats
from config import ATOL, DEVICES, RTOL from config import ATOL, DEVICES, RTOL
from parameterize import TEST_CASE_NAME, parameterize_cls, place, xrand from parameterize import TEST_CASE_NAME, parameterize_cls, place
np.random.seed(2022) np.random.seed(2022)
paddle.enable_static() paddle.enable_static()
......
...@@ -16,7 +16,6 @@ import unittest ...@@ -16,7 +16,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import scipy.stats
import config import config
import mock_data as mock import mock_data as mock
......
...@@ -16,7 +16,6 @@ import unittest ...@@ -16,7 +16,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import scipy.stats
import config import config
import mock_data as mock import mock_data as mock
......
...@@ -11,12 +11,10 @@ ...@@ -11,12 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import numbers
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle
import scipy.stats
import config import config
import parameterize as param import parameterize as param
......
...@@ -11,12 +11,10 @@ ...@@ -11,12 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import numbers
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle
import scipy.stats
import config import config
import parameterize as param import parameterize as param
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle.distribution import constraint, transform, variable from paddle.distribution import transform, variable
import config import config
import parameterize as param import parameterize as param
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle.distribution import transform, variable, constraint from paddle.distribution import transform, variable
import config import config
import parameterize as param import parameterize as param
......
...@@ -11,12 +11,10 @@ ...@@ -11,12 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import numbers
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle
import scipy.stats
import config import config
import parameterize as param import parameterize as param
......
...@@ -11,12 +11,10 @@ ...@@ -11,12 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import numbers
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle
import scipy.stats
import config import config
import parameterize as param import parameterize as param
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import math
import unittest import unittest
import numpy as np import numpy as np
......
...@@ -14,12 +14,10 @@ ...@@ -14,12 +14,10 @@
import unittest import unittest
import numpy as np
import paddle import paddle
from paddle.distribution import variable from paddle.distribution import variable
from paddle.distribution import constraint from paddle.distribution import constraint
import config
import parameterize as param import parameterize as param
paddle.seed(2022) paddle.seed(2022)
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import numbers
import unittest import unittest
import numpy as np import numpy as np
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import numbers
import unittest import unittest
import numpy as np import numpy as np
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册