未验证 提交 89bd4011 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][E401][F811] remove redefined imports and variables (#48538)

上级 e707ee53
...@@ -17,13 +17,13 @@ exclude = ...@@ -17,13 +17,13 @@ exclude =
ignore = ignore =
# E, see https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes # E, see https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes
E203, E203,
E401,E402, E402,
E501, E501,
E721,E722,E731,E741, E721,E722,E731,E741,
# F, see https://flake8.pycqa.org/en/latest/user/error-codes.html # F, see https://flake8.pycqa.org/en/latest/user/error-codes.html
F405, F405,
F811,F841, F841,
# W, see https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes # W, see https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes
W503 W503
...@@ -37,3 +37,9 @@ per-file-ignores = ...@@ -37,3 +37,9 @@ per-file-ignores =
.cmake-format.py: F821 .cmake-format.py: F821
python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py: F821 python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py: F821
python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py: F821 python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py: F821
# These files will be fixed in the future
python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py: F811
python/paddle/fluid/tests/unittests/test_activation_nn_grad.py: F811
python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py: F811
python/paddle/fluid/tests/unittests/test_matmul_v2_op.py: F811
python/paddle/fluid/tests/unittests/test_rrelu_op.py: F811
...@@ -25,8 +25,6 @@ if _in_eager_mode_: ...@@ -25,8 +25,6 @@ if _in_eager_mode_:
else: else:
from .py_layer import LegacyPyLayer as PyLayer # noqa: F401 from .py_layer import LegacyPyLayer as PyLayer # noqa: F401
from .py_layer import LegacyPyLayerContext as PyLayerContext # noqa: F401 from .py_layer import LegacyPyLayerContext as PyLayerContext # noqa: F401
from ..framework import set_grad_enabled, is_grad_enabled # noqa: F401
from ..fluid.dygraph.base import no_grad_ as no_grad # noqa: F401
from .saved_tensors_hooks import saved_tensors_hooks from .saved_tensors_hooks import saved_tensors_hooks
__all__ = [ # noqa __all__ = [ # noqa
......
...@@ -35,7 +35,6 @@ from paddle.fluid.framework import Variable ...@@ -35,7 +35,6 @@ from paddle.fluid.framework import Variable
from paddle.fluid.io import is_belong_to_optimizer, is_parameter from paddle.fluid.io import is_belong_to_optimizer, is_parameter
OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName() OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()
OpRole = core.op_proto_and_checker_maker.OpRole
__no_shape_var_type__ = [ __no_shape_var_type__ = [
core.VarDesc.VarType.READER, core.VarDesc.VarType.READER,
......
...@@ -13,7 +13,8 @@ ...@@ -13,7 +13,8 @@
# limitations under the License. # limitations under the License.
import signal import signal
import os, sys import os
import sys
from .manager import ElasticManager from .manager import ElasticManager
from .manager import ElasticStatus from .manager import ElasticStatus
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import paddle import paddle
from paddle.distributed import fleet from paddle.distributed import fleet
from paddle.fluid.dygraph.varbase_patch_methods import _grad_scalar
from .base.topology import ParallelMode from .base.topology import ParallelMode
from .meta_parallel import ( from .meta_parallel import (
......
...@@ -18,7 +18,6 @@ import unittest ...@@ -18,7 +18,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid.core as core
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
......
...@@ -31,7 +31,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_sta ...@@ -31,7 +31,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_sta
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import ( from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import (
GroupShardedStage2, GroupShardedStage2,
) )
from paddle.fluid.dygraph.nn import Linear
from paddle.incubate.distributed.utils.io import load, save from paddle.incubate.distributed.utils.io import load, save
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -24,7 +24,6 @@ from paddle.distributed.sharding import ( ...@@ -24,7 +24,6 @@ from paddle.distributed.sharding import (
group_sharded_parallel, group_sharded_parallel,
save_group_sharded_model, save_group_sharded_model,
) )
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -22,7 +22,6 @@ from paddle.distributed.sharding import ( ...@@ -22,7 +22,6 @@ from paddle.distributed.sharding import (
group_sharded_parallel, group_sharded_parallel,
save_group_sharded_model, save_group_sharded_model,
) )
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -28,7 +28,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_sta ...@@ -28,7 +28,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_sta
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import ( from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import (
GroupShardedStage2, GroupShardedStage2,
) )
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -28,7 +28,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_sta ...@@ -28,7 +28,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_sta
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import ( from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage2 import (
GroupShardedStage2, GroupShardedStage2,
) )
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -34,7 +34,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import ...@@ -34,7 +34,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import ( from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import (
GroupShardedScaler, GroupShardedScaler,
) )
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -24,7 +24,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import ...@@ -24,7 +24,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import ( from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import (
GroupShardedScaler, GroupShardedScaler,
) )
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -23,7 +23,6 @@ from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimiz ...@@ -23,7 +23,6 @@ from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimiz
ShardingOptimizerStage2, ShardingOptimizerStage2,
) )
from paddle.distributed.fleet.utils.internal_storage import GradStorage from paddle.distributed.fleet.utils.internal_storage import GradStorage
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -29,7 +29,6 @@ from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimiz ...@@ -29,7 +29,6 @@ from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimiz
from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ( from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import (
ShardingStage2, ShardingStage2,
) )
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -35,7 +35,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage3 import ( ...@@ -35,7 +35,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage3 import (
from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import ( from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import (
ShardingScaler, ShardingScaler,
) )
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -25,7 +25,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage3 import ( ...@@ -25,7 +25,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage3 import (
from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import ( from paddle.distributed.fleet.meta_parallel.sharding.sharding_utils import (
ShardingScaler, ShardingScaler,
) )
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -18,7 +18,6 @@ from test_dist_base import TestParallelDyGraphRunnerBase, runtime_main ...@@ -18,7 +18,6 @@ from test_dist_base import TestParallelDyGraphRunnerBase, runtime_main
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.nn import Linear
from paddle.nn import Linear from paddle.nn import Linear
np.random.seed(2021) np.random.seed(2021)
......
...@@ -26,7 +26,6 @@ from test_dist_base import ( ...@@ -26,7 +26,6 @@ from test_dist_base import (
import paddle import paddle
import paddle.distributed as dist import paddle.distributed as dist
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Linear
from paddle.nn import Linear from paddle.nn import Linear
seed = 90 seed = 90
......
...@@ -18,7 +18,6 @@ from test_dist_base import runtime_main ...@@ -18,7 +18,6 @@ from test_dist_base import runtime_main
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Linear
from paddle.nn import Linear from paddle.nn import Linear
seed = 90 seed = 90
......
...@@ -20,7 +20,6 @@ from test_dist_base import TestParallelDyGraphRunnerBase, runtime_main ...@@ -20,7 +20,6 @@ from test_dist_base import TestParallelDyGraphRunnerBase, runtime_main
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.nn import Linear
from paddle.nn import Linear from paddle.nn import Linear
batch_size = 64 batch_size = 64
......
...@@ -34,7 +34,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import ...@@ -34,7 +34,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import ( from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import (
GroupShardedScaler, GroupShardedScaler,
) )
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -29,7 +29,6 @@ from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimiz ...@@ -29,7 +29,6 @@ from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimiz
from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ( from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import (
ShardingStage2, ShardingStage2,
) )
from paddle.fluid.dygraph.nn import Linear
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -18,7 +18,6 @@ import unittest ...@@ -18,7 +18,6 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid.core as core
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.distributed as dist import paddle.distributed as dist
import paddle.fluid.core as core
from paddle.distributed.collective import ( from paddle.distributed.collective import (
Group, Group,
_default_group_name, _default_group_name,
......
...@@ -138,8 +138,6 @@ class TestDistWord2vec2x2(TestDistRunnerBase): ...@@ -138,8 +138,6 @@ class TestDistWord2vec2x2(TestDistRunnerBase):
if __name__ == "__main__": if __name__ == "__main__":
import os
os.environ['CPU_NUM'] = '1' os.environ['CPU_NUM'] = '1'
os.environ['USE_CUDA'] = "FALSE" os.environ['USE_CUDA'] = "FALSE"
runtime_main(TestDistWord2vec2x2) runtime_main(TestDistWord2vec2x2)
...@@ -23,7 +23,7 @@ from predictor_utils import PredictorTools ...@@ -23,7 +23,7 @@ from predictor_utils import PredictorTools
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.fluid.dygraph.nn import BatchNorm, Linear from paddle.fluid.dygraph.nn import BatchNorm
from paddle.fluid.initializer import MSRA from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.jit import ProgramTranslator from paddle.jit import ProgramTranslator
......
...@@ -20,7 +20,7 @@ from test_lac import DynamicGRU ...@@ -20,7 +20,7 @@ from test_lac import DynamicGRU
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph import to_variable from paddle.fluid.dygraph import to_variable
from paddle.fluid.dygraph.nn import Embedding, Linear from paddle.fluid.dygraph.nn import Embedding
from paddle.jit import ProgramTranslator from paddle.jit import ProgramTranslator
from paddle.jit.api import declarative from paddle.jit.api import declarative
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -126,13 +126,13 @@ class TestCase7(TestBase): ...@@ -126,13 +126,13 @@ class TestCase7(TestBase):
@unittest.skip("Only support NCL, NCHW, NCDHW") @unittest.skip("Only support NCL, NCHW, NCDHW")
class TestCase7(TestBase): class TestCase8(TestBase):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"pad": [1, 2, 3, 4], "data_format": "NHWC"} self.attrs = {"pad": [1, 2, 3, 4], "data_format": "NHWC"}
@unittest.skip("Only support NCL, NCHW, NCDHW") @unittest.skip("Only support NCL, NCHW, NCDHW")
class TestCase7(TestBase): class TestCase9(TestBase):
def set_op_attrs(self): def set_op_attrs(self):
self.attrs = {"pad": [1, 2, 3, 4, 1, 3], "data_format": "NDHWC"} self.attrs = {"pad": [1, 2, 3, 4, 1, 3], "data_format": "NDHWC"}
......
...@@ -18,7 +18,6 @@ from test_dist_base import TestParallelDyGraphRunnerBase, runtime_main ...@@ -18,7 +18,6 @@ from test_dist_base import TestParallelDyGraphRunnerBase, runtime_main
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.nn import Linear
from paddle.nn import Linear from paddle.nn import Linear
np.random.seed(2021) np.random.seed(2021)
......
...@@ -847,7 +847,5 @@ class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase): ...@@ -847,7 +847,5 @@ class TestDygraphBatchNormOpenReserveSpace(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
import paddle
paddle.enable_static() paddle.enable_static()
unittest.main() unittest.main()
...@@ -422,7 +422,5 @@ class TestBatchNormUseGlobalStatsCase3(TestBatchNormUseGlobalStats): ...@@ -422,7 +422,5 @@ class TestBatchNormUseGlobalStatsCase3(TestBatchNormUseGlobalStats):
if __name__ == '__main__': if __name__ == '__main__':
import paddle
paddle.enable_static() paddle.enable_static()
unittest.main() unittest.main()
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
import os import os
import pickle import pickle
import socket
import subprocess import subprocess
import sys import sys
import tempfile import tempfile
...@@ -161,10 +162,6 @@ def runtime_main(test_class, col_type): ...@@ -161,10 +162,6 @@ def runtime_main(test_class, col_type):
model.run_trainer(args) model.run_trainer(args)
import socket
from contextlib import closing
class TestDistBase(unittest.TestCase): class TestDistBase(unittest.TestCase):
def setUp(self): def setUp(self):
self._port_set = set() self._port_set = set()
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
import os import os
import pickle import pickle
import socket
import subprocess import subprocess
import sys import sys
import tempfile import tempfile
...@@ -140,10 +141,6 @@ def runtime_main(test_class, col_type, sub_type): ...@@ -140,10 +141,6 @@ def runtime_main(test_class, col_type, sub_type):
model.run_trainer(args) model.run_trainer(args)
import socket
from contextlib import closing
class TestDistBase(unittest.TestCase): class TestDistBase(unittest.TestCase):
def setUp(self): def setUp(self):
self._port_set = set() self._port_set = set()
......
...@@ -150,7 +150,5 @@ class TestCropNoneShape(unittest.TestCase): ...@@ -150,7 +150,5 @@ class TestCropNoneShape(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
import paddle
paddle.enable_static() paddle.enable_static()
unittest.main() unittest.main()
...@@ -271,7 +271,5 @@ class TestCropTensorException(unittest.TestCase): ...@@ -271,7 +271,5 @@ class TestCropTensorException(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
import paddle
paddle.enable_static() paddle.enable_static()
unittest.main() unittest.main()
...@@ -12,14 +12,8 @@ ...@@ -12,14 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle
import paddle.distributed.fleet as fleet
import paddle.distributed.fleet.base.role_maker as role_maker
import paddle.fluid as fluid
from paddle.distributed.fleet.utils.ps_util import DistributedInfer
""" """
high level unit test for distribute fleet. high level unit test for distribute fleet.
""" """
import argparse import argparse
...@@ -34,6 +28,10 @@ import unittest ...@@ -34,6 +28,10 @@ import unittest
from contextlib import closing from contextlib import closing
import paddle import paddle
import paddle.distributed.fleet as fleet
import paddle.distributed.fleet.base.role_maker as role_maker
import paddle.fluid as fluid
from paddle.distributed.fleet.utils.ps_util import DistributedInfer
paddle.enable_static() paddle.enable_static()
......
...@@ -89,7 +89,7 @@ class TestMNIST(TestParallelExecutorBase): ...@@ -89,7 +89,7 @@ class TestMNIST(TestParallelExecutorBase):
return optimizer return optimizer
if only_forward: if only_forward:
_optimizer = None _optimizer = None # noqa: F811
( (
fuse_op_first_loss, fuse_op_first_loss,
......
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle import fluid
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
from paddle.fluid.layers import gru_unit from paddle.fluid.layers import gru_unit
......
...@@ -21,7 +21,7 @@ import paddle ...@@ -21,7 +21,7 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.nn import BatchNorm, Embedding, GRUUnit, Linear from paddle.fluid.dygraph.nn import BatchNorm, Embedding, GRUUnit
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
......
...@@ -53,7 +53,7 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -53,7 +53,7 @@ class TestMathOpPatches(unittest.TestCase):
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
a_np = np.random.random(size=[10, 1]).astype('float32') a_np = np.random.random(size=[10, 1]).astype('float32')
b_np = exe.run( (b_np,) = exe.run(
fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b]
) )
np.testing.assert_allclose(a_np + 10, b_np, rtol=1e-05) np.testing.assert_allclose(a_np + 10, b_np, rtol=1e-05)
...@@ -71,7 +71,7 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -71,7 +71,7 @@ class TestMathOpPatches(unittest.TestCase):
np.testing.assert_allclose(a_np - 10, b_np, rtol=1e-05) np.testing.assert_allclose(a_np - 10, b_np, rtol=1e-05)
@prog_scope() @prog_scope()
def test_radd_scalar(self): def test_rsub_scalar(self):
a = fluid.layers.data(name="a", shape=[1]) a = fluid.layers.data(name="a", shape=[1])
b = 10 - a b = 10 - a
place = fluid.CPUPlace() place = fluid.CPUPlace()
......
...@@ -161,7 +161,5 @@ class API_NormTest(unittest.TestCase): ...@@ -161,7 +161,5 @@ class API_NormTest(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
import paddle
paddle.enable_static() paddle.enable_static()
unittest.main() unittest.main()
...@@ -184,7 +184,7 @@ class TestOptimizerForVarBase(unittest.TestCase): ...@@ -184,7 +184,7 @@ class TestOptimizerForVarBase(unittest.TestCase):
z.backward() z.backward()
opt.step() opt.step()
def func_test_create_param_lr_with_no_1_value_for_coverage(self): def test_create_param_lr_with_no_1_value_for_coverage(self):
with _test_eager_guard(): with _test_eager_guard():
self.func_test_create_param_lr_with_1_for_coverage() self.func_test_create_param_lr_with_1_for_coverage()
self.func_test_create_param_lr_with_1_for_coverage() self.func_test_create_param_lr_with_1_for_coverage()
......
...@@ -1114,7 +1114,5 @@ class TestAnyAPI(unittest.TestCase): ...@@ -1114,7 +1114,5 @@ class TestAnyAPI(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
import paddle
paddle.enable_static() paddle.enable_static()
unittest.main() unittest.main()
...@@ -21,7 +21,6 @@ import paddle ...@@ -21,7 +21,6 @@ import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
paddle.enable_static() paddle.enable_static()
from op_test import OpTest
class TestSearchSorted(OpTest): class TestSearchSorted(OpTest):
......
...@@ -22,7 +22,6 @@ import paddle.fluid.layers as layers ...@@ -22,7 +22,6 @@ import paddle.fluid.layers as layers
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.control_flow import merge_lod_tensor, split_lod_tensor from paddle.fluid.layers.control_flow import merge_lod_tensor, split_lod_tensor
......
...@@ -19,7 +19,6 @@ import unittest ...@@ -19,7 +19,6 @@ import unittest
import gradient_checker import gradient_checker
import numpy as np import numpy as np
from decorator_helper import prog_scope from decorator_helper import prog_scope
from op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
......
...@@ -1380,11 +1380,6 @@ class TestVarBase(unittest.TestCase): ...@@ -1380,11 +1380,6 @@ class TestVarBase(unittest.TestCase):
self.func_tensor_str_bf16() self.func_tensor_str_bf16()
self.func_tensor_str_bf16() self.func_tensor_str_bf16()
def test_tensor_str_bf16(self):
with _test_eager_guard():
self.func_tensor_str_bf16()
self.func_tensor_str_bf16()
def func_test_print_tensor_dtype(self): def func_test_print_tensor_dtype(self):
paddle.disable_static(paddle.CPUPlace()) paddle.disable_static(paddle.CPUPlace())
a = paddle.rand([1]) a = paddle.rand([1])
......
...@@ -18,7 +18,6 @@ import unittest ...@@ -18,7 +18,6 @@ import unittest
import numpy as np import numpy as np
sys.path.append("..") sys.path.append("..")
import unittest
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import ( from xpu.get_test_cover_info import (
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
import os import os
import pickle import pickle
import socket
import subprocess import subprocess
import sys import sys
import tempfile import tempfile
...@@ -165,10 +166,6 @@ def runtime_main(test_class, col_type, sub_type): ...@@ -165,10 +166,6 @@ def runtime_main(test_class, col_type, sub_type):
model.run_trainer(args) model.run_trainer(args)
import socket
from contextlib import closing
class TestDistBase(unittest.TestCase): class TestDistBase(unittest.TestCase):
def setUp(self): def setUp(self):
self._port_set = set() self._port_set = set()
......
...@@ -26,7 +26,6 @@ from paddle.fluid import Program, program_guard ...@@ -26,7 +26,6 @@ from paddle.fluid import Program, program_guard
np.random.seed(10) np.random.seed(10)
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import ( from xpu.get_test_cover_info import (
XPUOpTestWrapper, XPUOpTestWrapper,
create_test_class, create_test_class,
......
...@@ -79,7 +79,7 @@ class XPUTestRollOp(XPUOpTestWrapper): ...@@ -79,7 +79,7 @@ class XPUTestRollOp(XPUOpTestWrapper):
self.shifts = [8, -1] self.shifts = [8, -1]
self.axis = [-1, -2] self.axis = [-1, -2]
class TestRollOpCase4(TestXPURollOp): class TestRollOpCase5(TestXPURollOp):
def init_shapes(self): def init_shapes(self):
self.x_shape = (100, 10, 5, 10) self.x_shape = (100, 10, 5, 10)
self.shifts = [20, -1] self.shifts = [20, -1]
......
...@@ -19,9 +19,6 @@ import numpy as np ...@@ -19,9 +19,6 @@ import numpy as np
sys.path.append("..") sys.path.append("..")
import unittest
import numpy as np
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import ( from xpu.get_test_cover_info import (
XPUOpTestWrapper, XPUOpTestWrapper,
......
...@@ -62,7 +62,7 @@ class CallTransformer(BaseTransformer): ...@@ -62,7 +62,7 @@ class CallTransformer(BaseTransformer):
'enumerate', 'enumerate',
'print', 'print',
} }
is_builtin = eval("is_builtin({})".format(func_str)) is_builtin = eval("is_builtin({})".format(func_str)) # noqa: F811
need_convert = func_str in need_convert_builtin_func_list need_convert = func_str in need_convert_builtin_func_list
return is_builtin and not need_convert return is_builtin and not need_convert
except Exception: except Exception:
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from paddle.framework import core from paddle.framework import core
from ...fluid.data_feeder import check_dtype, check_variable_and_dtype from ...fluid.data_feeder import check_dtype, check_variable_and_dtype
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.tensor import fill_constant from paddle.fluid.layers.tensor import fill_constant
from paddle.framework import core, in_dynamic_mode from paddle.framework import core, in_dynamic_mode
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.framework import _non_static_mode, core from paddle.framework import core
from paddle.utils import deprecated from paddle.utils import deprecated
from ...fluid.data_feeder import check_variable_and_dtype from ...fluid.data_feeder import check_variable_and_dtype
......
...@@ -43,7 +43,6 @@ from .common import Dropout # noqa: F401 ...@@ -43,7 +43,6 @@ from .common import Dropout # noqa: F401
from .common import Dropout2D # noqa: F401 from .common import Dropout2D # noqa: F401
from .common import Dropout3D # noqa: F401 from .common import Dropout3D # noqa: F401
from .common import AlphaDropout # noqa: F401 from .common import AlphaDropout # noqa: F401
from .common import Upsample # noqa: F401
from .common import UpsamplingBilinear2D # noqa: F401 from .common import UpsamplingBilinear2D # noqa: F401
from .common import UpsamplingNearest2D # noqa: F401 from .common import UpsamplingNearest2D # noqa: F401
from .common import Fold from .common import Fold
......
...@@ -30,7 +30,6 @@ from ..fluid.data_feeder import ( ...@@ -30,7 +30,6 @@ from ..fluid.data_feeder import (
convert_dtype, convert_dtype,
) )
from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
from ..fluid.framework import _in_legacy_dygraph
from ..fluid.layers import elementwise_sub, utils from ..fluid.layers import elementwise_sub, utils
from ..framework import ( from ..framework import (
LayerHelper, LayerHelper,
......
...@@ -652,7 +652,7 @@ class TestTransformsTensor(TestTransformsCV2): ...@@ -652,7 +652,7 @@ class TestTransformsTensor(TestTransformsCV2):
transform = transforms.RandomResizedCrop(64) transform = transforms.RandomResizedCrop(64)
transform(1) transform(1)
test_color_jitter = None test_color_jitter = None # noqa: F811
class TestFunctional(unittest.TestCase): class TestFunctional(unittest.TestCase):
......
...@@ -117,7 +117,7 @@ def _elementwise_mul_flops(input_shapes, attrs): ...@@ -117,7 +117,7 @@ def _elementwise_mul_flops(input_shapes, attrs):
@register_flops("elementwise_div") @register_flops("elementwise_div")
def _elementwise_mul_flops(input_shapes, attrs): def _elementwise_div_flops(input_shapes, attrs):
"""FLOPs computation for elementwise_div op. """FLOPs computation for elementwise_div op.
For elementwise_div(input,other): For elementwise_div(input,other):
input_shapes = [shape_of_input, shape_of_ohther] input_shapes = [shape_of_input, shape_of_ohther]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册