未验证 提交 49638f25 编写于 作者: J Jiangxinz 提交者: GitHub

fix undef var (#33691)

上级 56692f66
...@@ -29,6 +29,7 @@ import struct ...@@ -29,6 +29,7 @@ import struct
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from distutils.util import strtobool
logger = logging.getLogger("root") logger = logging.getLogger("root")
logger.propagate = False logger.propagate = False
...@@ -349,7 +350,7 @@ def add_arguments(argname, type, default, help, argparser, **kwargs): ...@@ -349,7 +350,7 @@ def add_arguments(argname, type, default, help, argparser, **kwargs):
add_argument("name", str, "Jonh", "User name.", parser) add_argument("name", str, "Jonh", "User name.", parser)
args = parser.parse_args() args = parser.parse_args()
""" """
type = distutils.util.strtobool if type == bool else type type = strtobool if type == bool else type
argparser.add_argument( argparser.add_argument(
"--" + argname, "--" + argname,
default=default, default=default,
...@@ -685,7 +686,7 @@ def get_device_proc_info(args): ...@@ -685,7 +686,7 @@ def get_device_proc_info(args):
gpus = get_gpus(args.gpus) gpus = get_gpus(args.gpus)
if args.nproc_per_node is not None: if args.nproc_per_node is not None:
assert (len(gpus) % int(args.nproc_per_node)) ==0, \ assert (len(gpus) % int(args.nproc_per_node)) ==0, \
"gpus' number:{} mod args.nproc_per_node:{} must == 0".format(len(gpus), arg.nproc_per_node) "gpus' number:{} mod args.nproc_per_node:{} must == 0".format(len(gpus), args.nproc_per_node)
n = int(len(gpus) / int(args.nproc_per_node)) n = int(len(gpus) / int(args.nproc_per_node))
devices_per_proc = [ devices_per_proc = [
...@@ -699,7 +700,7 @@ def get_device_proc_info(args): ...@@ -699,7 +700,7 @@ def get_device_proc_info(args):
xpus = get_xpus(args.xpus) xpus = get_xpus(args.xpus)
if args.nproc_per_node is not None: if args.nproc_per_node is not None:
assert (len(xpus) % int(args.nproc_per_node)) == 0, \ assert (len(xpus) % int(args.nproc_per_node)) == 0, \
"xpus' number:{} mod args.nproc_per_node:{} must == 0".format(len(xpus), arg.nproc_per_node) "xpus' number:{} mod args.nproc_per_node:{} must == 0".format(len(xpus), args.nproc_per_node)
n = int(len(xpus) / int(args.nproc_per_node)) n = int(len(xpus) / int(args.nproc_per_node))
devices_per_proc = [ devices_per_proc = [
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
from __future__ import print_function from __future__ import print_function
import unittest
import paddle import paddle
from paddle.fluid.contrib import sparsity from paddle.fluid.contrib import sparsity
from paddle.fluid.tests.unittests.asp.asp_pruning_base import TestASPHelperPruningBase from paddle.fluid.tests.unittests.asp.asp_pruning_base import TestASPHelperPruningBase
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
from __future__ import print_function from __future__ import print_function
import unittest
import paddle import paddle
from paddle.fluid.contrib import sparsity from paddle.fluid.contrib import sparsity
from paddle.fluid.tests.unittests.asp.asp_pruning_base import TestASPHelperPruningBase from paddle.fluid.tests.unittests.asp.asp_pruning_base import TestASPHelperPruningBase
......
...@@ -20,6 +20,7 @@ import six ...@@ -20,6 +20,7 @@ import six
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer
from paddle.optimizer.lr import NoamDecay
from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase
""" """
......
...@@ -141,7 +141,7 @@ class TestXPUBatchNormOp(unittest.TestCase): ...@@ -141,7 +141,7 @@ class TestXPUBatchNormOp(unittest.TestCase):
else: else:
raise ValueError( raise ValueError(
"Unsupported data layout! Only NCHW and NHWC is supported, but received " "Unsupported data layout! Only NCHW and NHWC is supported, but received "
+ data_layout) + self.data_layout)
np.random.seed(1024) np.random.seed(1024)
self.x_np = np.random.random_sample(self.shape).astype(self.dtype) self.x_np = np.random.random_sample(self.shape).astype(self.dtype)
self.scale_np = np.random.random_sample( self.scale_np = np.random.random_sample(
......
...@@ -17,6 +17,7 @@ import json ...@@ -17,6 +17,7 @@ import json
import glob import glob
import logging import logging
import pandas as pd import pandas as pd
import multiprocessing
from multiprocessing import Process from multiprocessing import Process
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册