未验证 提交 c1efbdd4 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][F401] remove unused import in unittests/(!test_) (#46693)

上级 7d1819b9
......@@ -13,15 +13,13 @@
# limitations under the License.
import os
import sys
import time
import paddle.fluid as fluid
from paddle.fluid import unique_name
import paddle.fluid.core as core
import paddle
from paddle.fluid.layer_helper import LayerHelper
from paddle.distributed import fleet
from paddle.distributed.fleet.meta_optimizers.ascend import ascend_parser, ascend_optimizer
from paddle.distributed.fleet.meta_optimizers.ascend import ascend_optimizer
from collections import namedtuple
Block = namedtuple('Block', ['program'])
......
......@@ -14,7 +14,6 @@
import os
import sys
import time
def train(prefix):
......
......@@ -13,21 +13,14 @@
# limitations under the License.
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet
import os
import sys
from paddle.distributed.fleet.utils.fs import LocalFS, HDFSClient
import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp
from paddle.fluid.incubate.checkpoint.checkpoint_saver import PaddleModel
from paddle.fluid.framework import program_guard
from paddle.fluid import unique_name
import numpy as np
from paddle.io import Dataset, BatchSampler, DataLoader
BATCH_NUM = 4
BATCH_SIZE = 1
......
......@@ -16,7 +16,6 @@ import unittest
import random
import numpy as np
import os
import shutil
import paddle
import paddle.nn as nn
......
......@@ -14,13 +14,11 @@
import unittest
import copy
import numpy as np
import random
import paddle
import paddle.nn as nn
import paddle.fluid.core as core
from paddle.distributed.fleet import auto
import paddle.nn.functional as F
from paddle.distributed import fleet
......
......@@ -13,19 +13,14 @@
# limitations under the License.
import collections
import random
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import paddle.tensor as tensor
from paddle.distributed.fleet import auto
from paddle import fluid
from paddle.fluid import layers
from paddle.distributed import fleet
from paddle.nn.layer.transformer import _convert_param_attr_to_list
from paddle.fluid.initializer import Normal, NumpyArrayInitializer
paddle.enable_static()
......
......@@ -13,13 +13,9 @@
# limitations under the License.
import numpy as np
import unittest
import time
import itertools
import six
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from op_test import OpTest
......
......@@ -15,9 +15,7 @@
import unittest
import numpy as np
import paddle.fluid as fluid
from benchmark import BenchmarkSuite
from op_test import OpTest
# This is a demo op test case for operator benchmarking and high resolution number stability alignment.
......
......@@ -15,8 +15,6 @@
import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.framework import core
SEED = 2021
......
......@@ -13,8 +13,6 @@
# limitations under the License.
import os
import sys
import time
import numpy as np
os.environ[str("FLAGS_check_nan_inf")] = str("1")
......@@ -23,7 +21,6 @@ os.environ[str("GLOG_vmodule")] = str("nan_inf_utils_detail=10")
import paddle.fluid.core as core
import paddle
import paddle.fluid as fluid
import paddle.compat as cpt
paddle.enable_static()
......
......@@ -13,8 +13,6 @@
# limitations under the License.
import os
import sys
import time
import numpy as np
os.environ[str("FLAGS_check_nan_inf")] = str("1")
......
......@@ -15,9 +15,7 @@
import os
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.distributed as dist
import test_communication_api_base as test_base
import test_collective_api_base as test_collective_base
......
......@@ -13,8 +13,6 @@
# limitations under the License.
import unittest
import paddle
import itertools
import test_communication_api_base as test_base
......
......@@ -12,24 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import os
import sys
import signal
import time
from contextlib import closing
from six import string_types
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
import unittest
from multiprocessing import Process
import paddle.fluid.layers as layers
from functools import reduce
from test_collective_base import TestCollectiveRunnerBase, runtime_main
paddle.enable_static()
......
......@@ -12,24 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import os
import sys
import signal
import time
from contextlib import closing
from six import string_types
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
import unittest
from multiprocessing import Process
import paddle.fluid.layers as layers
from functools import reduce
from test_collective_base import TestCollectiveRunnerBase, runtime_main
paddle.enable_static()
......
......@@ -12,25 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import os
import sys
import signal
import time
import socket
from contextlib import closing
from six import string_types
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
import unittest
from multiprocessing import Process
import paddle.fluid.layers as layers
from functools import reduce
from test_collective_base import TestCollectiveRunnerBase, runtime_main
paddle.enable_static()
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import sys
import paddle.fluid as fluid
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import sys
import paddle.fluid as fluid
......
......@@ -12,19 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import signal
from functools import reduce
from test_dist_base import TestDistRunnerBase, runtime_main
......
......@@ -26,8 +26,6 @@ import numpy as np
import ctr_dataset_reader
from test_dist_fleet_base import runtime_main, FleetDistRunnerBase
from paddle.distributed.fleet.utils.ps_util import DistributedInfer
import paddle.distributed.fleet as fleet
paddle.enable_static()
......
......@@ -25,7 +25,7 @@ import os
import numpy as np
import ctr_dataset_reader
from test_dist_fleet_base import runtime_main, FleetDistRunnerBase
from test_dist_fleet_base import runtime_main
from dist_fleet_ctr import TestDistCTR2x2, fake_ctr_reader
# Fix seed for test
......
......@@ -12,16 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import numpy as np
import logging
import paddle
import paddle.fluid as fluid
#import paddle.fluid.incubate.fleet.base.role_maker as role_maker
import paddle.distributed.fleet.base.role_maker as role_maker
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger("fluid")
......
......@@ -15,18 +15,14 @@
Distribute CTR model for test fleet api
"""
import shutil
import tempfile
import time
import paddle
import paddle.fluid as fluid
import os
import numpy as np
import ctr_dataset_reader
from test_dist_fleet_heter_base import runtime_main, FleetDistHeterRunnerBase
from dist_fleet_ctr import TestDistCTR2x2, fake_ctr_reader
paddle.enable_static()
......
......@@ -13,12 +13,9 @@
# limitations under the License.
from test_dist_base import TestDistRunnerBase, runtime_main
import unittest
import paddle
import os
import paddle.distributed.fleet as fleet
import paddle.distributed.fleet.base.role_maker as role_maker
import numpy as np
from functools import reduce
import paddle.fluid as fluid
......
......@@ -13,12 +13,9 @@
# limitations under the License.
from test_dist_base import TestDistRunnerBase, runtime_main
import unittest
import paddle
import os
import paddle.distributed.fleet as fleet
import paddle.distributed.fleet.base.role_maker as role_maker
import numpy as np
from functools import reduce
import paddle.fluid as fluid
......
......@@ -13,22 +13,11 @@
# limitations under the License.
import numpy as np
import argparse
import time
import math
import random
import shutil
import tempfile
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import signal
from functools import reduce
from test_dist_fleet_base import runtime_main, FleetDistRunnerBase
paddle.enable_static()
......
......@@ -16,9 +16,7 @@ Distribute CTR model for test fleet api
"""
import os
import time
import random
import numpy as np
import paddle
......
......@@ -12,22 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import signal
from functools import reduce
from test_dist_base import TestDistRunnerBase, runtime_main
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
from paddle.fluid.incubate.fleet.collective import fleet
paddle.enable_static()
......
......@@ -12,20 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import signal
from functools import reduce
from test_dist_base import TestDistRunnerBase, runtime_main
from dist_mnist import cnn_model
......
......@@ -12,20 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import signal
from functools import reduce
from test_dist_base import TestDistRunnerBase, runtime_main
from dist_mnist import cnn_model
......
......@@ -14,18 +14,9 @@
import os
import sys
import signal
import subprocess
import argparse
import time
import math
import random
from multiprocessing import Process
from functools import reduce
import numpy as np
import pickle
import unittest
import six
import paddle
......@@ -33,7 +24,7 @@ import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import io
from test_dist_base import TestDistRunnerBase, runtime_main, RUN_STEP
from test_dist_base import RUN_STEP, runtime_main
from dist_simnet_bow import TestDistSimnetBow2x2, DATA_URL, DATA_MD5
......
......@@ -12,20 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import sys
import signal
from test_dist_base import TestDistRunnerBase, runtime_main
paddle.enable_static()
......
......@@ -14,10 +14,7 @@
import paddle
import paddle.fluid as fluid
from test_dist_base import TestDistRunnerBase
from dist_mnist import cnn_model
# from paddle.fluid.incubate.fleet.collective import fleet
import paddle.distributed.fleet as fleet
from dist_mnist import cnn_model # noqa: F401
import paddle.distributed.fleet.base.role_maker as role_maker
import paddle.distributed.fleet.meta_optimizers.sharding as sharding
......
......@@ -12,24 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import signal
import six
import tarfile
import string
import re
from functools import reduce
from test_dist_base import TestDistRunnerBase, runtime_main
DTYPE = "float32"
......
......@@ -13,15 +13,9 @@
# limitations under the License.
import numpy as np
import argparse
import time
import math
import os
import sys
import six
import argparse
import ast
import multiprocessing
import time
from functools import partial
from os.path import expanduser
......@@ -29,16 +23,12 @@ import glob
import random
import tarfile
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
from test_dist_base import TestDistRunnerBase, runtime_main, RUN_STEP
import paddle.compat as cpt
from paddle.compat import long_type
import hashlib
const_para_attr = fluid.ParamAttr(initializer=fluid.initializer.Constant(0.001))
const_bias_attr = const_para_attr
......
......@@ -12,18 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import signal
from test_dist_base import TestDistRunnerBase, runtime_main
IS_SPARSE = True
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import paddle
import paddle.fluid.core as core
......@@ -21,11 +20,9 @@ from paddle.incubate import DistributedFusedLamb
from paddle.vision.models import resnet18 as resnet
from paddle.distributed.fleet.meta_optimizers.common import CollectiveHelper
from paddle.fluid.clip import ClipGradBase
import paddle.nn as nn
import numpy as np
import os
import unittest
from paddle.distributed.fleet.meta_optimizers.common import is_optimizer_op, is_backward_op
from paddle.fluid.clip import _clip_by_global_norm_using_mp_type
import distutils
......
......@@ -15,17 +15,9 @@
import unittest
import random
import numpy as np
import os
import shutil
import paddle
import paddle.nn as nn
from paddle.fluid import core
import datetime
from datetime import timedelta
import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.dygraph.parallel import ParallelEnv
class TestDygraphFleetAPI(unittest.TestCase):
......
......@@ -16,14 +16,10 @@ import unittest
import numpy as np
import paddle
from paddle.autograd import PyLayer
from paddle.distributed.fleet.utils import recompute
from paddle.incubate.distributed.fleet import recompute_hybrid
import random
from paddle.distributed import fleet
import paddle.fluid.layers as layers
def get_fc_block(block_idx, input_size, is_last=False):
block_name = "block_" + str(block_idx)
......
......@@ -14,7 +14,6 @@
import os
import sys
import time
def train():
......
......@@ -15,7 +15,6 @@
import paddle
import paddle.fluid as fluid
import paddle.distributed.fleet as fleet
import paddle.distributed.fleet.base.role_maker as role_maker
fluid.disable_dygraph()
......
......@@ -13,16 +13,13 @@
# limitations under the License.
"""This is the lib for gradient checker unittest."""
import unittest
import six
import collections
import numpy as np
from itertools import product
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.executor import Executor
from paddle.fluid.backward import _append_grad_suffix_, _as_list
from paddle.fluid.framework import _test_eager_guard
try:
......
......@@ -34,7 +34,6 @@ import sys
import json
import socket
from argparse import ArgumentParser
from typing import Dict, Any
def parse_args():
......
......@@ -13,13 +13,9 @@
# limitations under the License.
import unittest
import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet
import os
import sys
from paddle.distributed.fleet.utils.fs import LocalFS, HDFSClient, FSTimeOut, FSFileExistsError, FSFileNotExistsError
from paddle.distributed.fleet.utils.fs import FSFileExistsError, FSFileNotExistsError, HDFSClient, LocalFS
java_home = os.environ["JAVA_HOME"]
......
......@@ -14,8 +14,6 @@
import unittest
import numpy as np
import os
import paddle
from paddle.distributed import fleet
from paddle.fluid.dygraph.container import Sequential
import paddle.nn as nn
......
......@@ -14,11 +14,9 @@
import os
import sys
import six
import unittest
import time
import math
import multiprocessing
import numpy as np
import paddle
......
......@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Pool, Process
from multiprocessing import Process
import os
import socket
from contextlib import closing
......
......@@ -12,12 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import paddle
import re
import collections
import time
import paddle.distributed.fleet as fleet
......
......@@ -14,7 +14,6 @@
import os
import sys
import time
import paddle.fluid as fluid
......
......@@ -20,9 +20,6 @@ import numpy as np
import random
import six
import struct
import time
import itertools
import collections
from collections import defaultdict
from copy import copy
......@@ -35,7 +32,7 @@ from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, OpProtoHolder, Variable, _current_expected_place
from paddle.fluid.framework import OpProtoHolder, Program, _current_expected_place
from paddle.fluid import unique_name
from paddle.fluid.dygraph.dygraph_to_static.utils import parse_arg_and_kwargs
......@@ -1423,7 +1420,6 @@ class OpTest(unittest.TestCase):
judge whether convert current output and expect to uint16.
return True | False
"""
pass
def _is_skip_name(self, name):
if name not in self.expects:
......
......@@ -12,30 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import warnings
import numpy as np
import random
import six
import struct
import time
import itertools
import collections
from collections import defaultdict
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, OpProtoHolder, Variable, convert_np_dtype_to_dtype_
from testsuite import create_op, set_input, append_input_output, append_loss_ops
from paddle.fluid import unique_name
from white_list import op_accuracy_white_list, check_shape_white_list, compile_vs_runtime_white_list, no_check_set_white_list
from paddle.fluid.framework import Program, convert_np_dtype_to_dtype_
from testsuite import append_loss_ops, create_op, set_input
from white_list import op_threshold_white_list, no_grad_set_white_list
from op_test import OpTest, _set_use_system_allocator, get_numeric_gradient
from op_test import OpTest
from xpu.get_test_cover_info import is_empty_grad_op_type, get_xpu_op_support_types, type_dict_str_to_numpy
......
......@@ -17,9 +17,7 @@ import unittest
import paddle
import numpy as np
import paddle.distributed as dist
from paddle.fluid.dygraph.nn import Linear
from paddle.autograd import PyLayer
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
from paddle.distributed.fleet.utils.hybrid_parallel_util import fused_allreduce_gradients
batch = 5
......
......@@ -13,7 +13,6 @@
# limitations under the License.
import unittest
import os
import paddle
import numpy as np
......
......@@ -13,8 +13,6 @@
# limitations under the License.
import unittest
import os
import copy
import paddle
import numpy as np
......@@ -22,8 +20,6 @@ import paddle.distributed as dist
import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.dygraph.parallel import ParallelEnv
import paddle.fluid.core as core
paddle.seed(1024)
np.random.seed(2021)
......
......@@ -12,18 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import contextlib
import unittest
import numpy as np
import six
import pickle
import paddle
import paddle.fluid as fluid
import paddle.fluid.dygraph as dygraph
from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from paddle.fluid.dygraph.base import to_variable
......
......@@ -12,16 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.dygraph as dygraph
from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer
from paddle.fluid.dygraph.nn import Linear
from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase
......
......@@ -12,12 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.optimizer import SGDOptimizer
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.dygraph.base import to_variable
from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase
......
......@@ -16,7 +16,7 @@ import paddle
import paddle.fluid as fluid
from parallel_dygraph_sparse_embedding import SimpleNet, fake_sample_reader, TestSparseEmbedding
from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase
from test_dist_base import runtime_main
# global configs
# using small `vocab_size` to test rows number over height
......
......@@ -14,9 +14,7 @@
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import math
import paddle.distributed.fleet as fleet
class DNNLayer(nn.Layer):
......
......@@ -17,11 +17,9 @@ import paddle.fluid as fluid
fluid.core._set_eager_deletion_mode(-1, -1, False)
import paddle
import paddle.fluid.layers.ops as ops
from paddle.fluid.layers.learning_rate_scheduler import cosine_decay
from simple_nets import init_data
from seresnext_test_base import DeviceType
import math
import os
os.environ['CPU_NUM'] = str(4)
......
......@@ -12,13 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import tarfile
import random
import paddle
import paddle.distributed.fleet as fleet
logging.basicConfig()
......
......@@ -20,12 +20,6 @@ from test_dist_base import TestDistRunnerBase, runtime_main
from paddle.incubate.nn import FusedMultiTransformer
import paddle.distributed.fleet as fleet
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle.fluid.dygraph.layers import Layer
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid import core
from paddle.nn.initializer import Constant
paddle.enable_static()
......
......@@ -15,7 +15,6 @@
from functools import partial
import numpy as np
import os
import paddle.fluid as fluid
import paddle.fluid.layers as layers
......
......@@ -14,12 +14,7 @@
from paddle.fluid.framework import _dygraph_guard
import paddle.fluid as fluid
from paddle.fluid.framework import Variable
import paddle.fluid.dygraph.jit as jit
from paddle.fluid.dygraph.jit import extract_vars
import numpy as np
import os
import time
__all__ = ['DyGraphProgramDescTracerTestHelper', 'is_equal_program']
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册