未验证 提交 9f83d3c6 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][F401] remove unused imports in fluid/tests/custom_* (#46717)

上级 fd8d92ca
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
import os import os
import site import site
from paddle.fluid import core from paddle.fluid import core
from distutils.sysconfig import get_python_lib
from distutils.core import setup, Extension from distutils.core import setup, Extension
from setuptools.command.build_ext import build_ext from setuptools.command.build_ext import build_ext
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import os import os
import sys import sys
import site
import unittest import unittest
import numpy as np import numpy as np
......
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
from utils import paddle_includes, extra_compile_args, IS_MAC from utils import paddle_includes, extra_compile_args, IS_MAC
from paddle.utils.cpp_extension import CUDAExtension, setup, CppExtension from paddle.utils.cpp_extension import CUDAExtension, setup, CppExtension
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
import paddle
import os import os
import warnings import warnings
......
...@@ -18,7 +18,7 @@ import paddle ...@@ -18,7 +18,7 @@ import paddle
import numpy as np import numpy as np
from paddle.utils.cpp_extension import load, get_build_directory from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args, IS_WINDOWS, IS_MAC from utils import IS_MAC, extra_cc_args, extra_nvcc_args, paddle_includes
from test_custom_relu_op_setup import custom_relu_dynamic, custom_relu_static from test_custom_relu_op_setup import custom_relu_dynamic, custom_relu_static
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
# Because Windows don't use docker, the shared lib already exists in the # Because Windows don't use docker, the shared lib already exists in the
......
...@@ -18,8 +18,6 @@ import site ...@@ -18,8 +18,6 @@ import site
import unittest import unittest
import paddle import paddle
import paddle.static as static import paddle.static as static
import tempfile
import subprocess
import numpy as np import numpy as np
from paddle.vision.transforms import Compose, Normalize from paddle.vision.transforms import Compose, Normalize
from paddle.utils.cpp_extension.extension_utils import run_cmd from paddle.utils.cpp_extension.extension_utils import run_cmd
......
...@@ -18,7 +18,6 @@ import numpy as np ...@@ -18,7 +18,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.static as static
from paddle.utils.cpp_extension import load, get_build_directory from paddle.utils.cpp_extension import load, get_build_directory
from paddle.utils.cpp_extension.extension_utils import run_cmd from paddle.utils.cpp_extension.extension_utils import run_cmd
from utils import paddle_includes, extra_cc_args, extra_nvcc_args from utils import paddle_includes, extra_cc_args, extra_nvcc_args
......
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
import os import os
import subprocess
import unittest import unittest
import numpy as np import numpy as np
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import os import os
import sys import sys
import six
from distutils.sysconfig import get_python_lib from distutils.sysconfig import get_python_lib
from paddle.utils.cpp_extension.extension_utils import IS_WINDOWS from paddle.utils.cpp_extension.extension_utils import IS_WINDOWS
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import os import os
import sys import sys
import time
def train(prefix): def train(prefix):
......
...@@ -15,12 +15,9 @@ ...@@ -15,12 +15,9 @@
import unittest import unittest
import random import random
import numpy as np import numpy as np
import os
import shutil
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
from datetime import timedelta
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import unittest import unittest
import os import os
import sys
import copy import copy
import subprocess import subprocess
import time import time
...@@ -27,7 +26,7 @@ def start_local_trainers(cluster, ...@@ -27,7 +26,7 @@ def start_local_trainers(cluster,
training_script_args, training_script_args,
eager_mode=True, eager_mode=True,
log_dir=None): log_dir=None):
from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc # noqa: F401
current_env = copy.copy(os.environ.copy()) current_env = copy.copy(os.environ.copy())
#paddle broadcast ncclUniqueId use socket, and #paddle broadcast ncclUniqueId use socket, and
...@@ -83,7 +82,7 @@ def start_local_trainers(cluster, ...@@ -83,7 +82,7 @@ def start_local_trainers(cluster,
def get_cluster_from_args(selected_gpus): def get_cluster_from_args(selected_gpus):
from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc # noqa: F401
cluster_node_ips = '127.0.0.1' cluster_node_ips = '127.0.0.1'
node_ip = '127.0.0.1' node_ip = '127.0.0.1'
...@@ -107,7 +106,7 @@ def get_cluster_from_args(selected_gpus): ...@@ -107,7 +106,7 @@ def get_cluster_from_args(selected_gpus):
class TestMultipleCustomCPU(unittest.TestCase): class TestMultipleCustomCPU(unittest.TestCase):
def run_mnist_2custom_cpu(self, target_file_name, eager_mode=True): def run_mnist_2custom_cpu(self, target_file_name, eager_mode=True):
from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc # noqa: F401
selected_devices = [0, 1] selected_devices = [0, 1]
cluster = None cluster = None
...@@ -160,7 +159,7 @@ class TestProcessGroup(TestMultipleCustomCPU): ...@@ -160,7 +159,7 @@ class TestProcessGroup(TestMultipleCustomCPU):
self.temp_dir.cleanup() self.temp_dir.cleanup()
def test_process_group_xccl(self): def test_process_group_xccl(self):
from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc from paddle.distributed.utils.launch_utils import find_free_ports, watch_local_trainers, get_cluster, TrainerProc # noqa: F401
self.run_mnist_2custom_cpu('process_group_xccl.py') self.run_mnist_2custom_cpu('process_group_xccl.py')
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import os import os
import sys import sys
import site
import unittest import unittest
import numpy as np import numpy as np
import tempfile import tempfile
......
...@@ -14,9 +14,7 @@ ...@@ -14,9 +14,7 @@
import os import os
import sys import sys
import site
import unittest import unittest
import numpy as np
import tempfile import tempfile
......
...@@ -24,7 +24,6 @@ BATCH_SIZE = 1024 ...@@ -24,7 +24,6 @@ BATCH_SIZE = 1024
def train_func_base(epoch_id, train_loader, model, cost, optimizer): def train_func_base(epoch_id, train_loader, model, cost, optimizer):
import paddle
total_step = len(train_loader) total_step = len(train_loader)
epoch_start = time.time() epoch_start = time.time()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册