未验证 提交 8f65f72e 编写于 作者: N Nyakku Shigure 提交者: GitHub

bump ruff to 0.0.272 and update config (#54449)

* bump ruff to 0.0.271 and update config

* exclude third_party

* bump ruff to 0.0.272

* refine config
上级 4a77cf53
[flake8] [flake8]
select = C,E,F,W select = C,E,W
exclude = exclude =
./build, ./build,
# A trick to exclude fluid/ but keep fluid/tests/, see more at # Exclude fluid directory
# https://github.com/PaddlePaddle/Paddle/pull/46290#discussion_r976392010 ./python/paddle/fluid/**,
./python/paddle/fluid/[!t]**,
./python/paddle/fluid/tra**,
# Exclude third-party libraries # Exclude third-party libraries
./third_party/**,
./python/paddle/utils/gast/**, ./python/paddle/utils/gast/**,
# Temporarily ignore CINN files, it will fix later
./python/cinn/**,
./test/cinn/**,
ignore = ignore =
# Whitespace before ‘,’, ‘;’, or ‘:’, it is not compatible with black # Whitespace before ‘,’, ‘;’, or ‘:’, it is not compatible with black
E203, E203,
...@@ -23,20 +25,8 @@ ignore = ...@@ -23,20 +25,8 @@ ignore =
E731, E731,
# Do not use variables named ‘l’, ‘O’, or ‘I’ # Do not use variables named ‘l’, ‘O’, or ‘I’
E741, E741,
# `name` may be undefined, or defined from star imports: `module`
F405,
# Local variable name is assigned to but never used
F841,
# Line break before binary operator, it is not compatible with black # Line break before binary operator, it is not compatible with black
W503 W503
per-file-ignores = per-file-ignores =
# These files need tabs for testing. # These files need tabs for testing.
test/dygraph_to_static/test_error.py:E101,W191 test/dygraph_to_static/test_error.py:E101,W191
python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py:E101,W191
# Ignore unused imports in __init__.py
__init__.py: F401
# Ignore undefined variables in CMake config and some dygraph_to_static tests
.cmake-format.py: F821
test/dygraph_to_static/test_loop.py: F821
test/dygraph_to_static/test_closure_analysis.py: F821
python/paddle/static/amp/decorator.py: F811
...@@ -4,7 +4,8 @@ exclude: | ...@@ -4,7 +4,8 @@ exclude: |
patches/.+| patches/.+|
paddle/fluid/framework/fleet/heter_ps/cudf/.+| paddle/fluid/framework/fleet/heter_ps/cudf/.+|
paddle/fluid/distributed/ps/thirdparty/round_robin.h| paddle/fluid/distributed/ps/thirdparty/round_robin.h|
python/paddle/utils/gast/.+ python/paddle/utils/gast/.+|
third_party/.+
)$ )$
repos: repos:
# Common hooks # Common hooks
...@@ -32,12 +33,11 @@ repos: ...@@ -32,12 +33,11 @@ repos:
name: Tabs remover (Python) name: Tabs remover (Python)
files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$ files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$
args: [--whitespaces-count, '4'] args: [--whitespaces-count, '4']
# Exclude the fluid directory but keep the fluid/tests directory. # Exclude the fluid directory.
# And exclude some unit test files that require tabs. # And exclude some unit test files that require tabs.
exclude: | exclude: |
(?x)^( (?x)^(
python/paddle/fluid/(?!tests).+| python/paddle/fluid/.+|
python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py|
test/dygraph_to_static/test_error.py test/dygraph_to_static/test_error.py
)$ )$
- repo: local - repo: local
...@@ -66,8 +66,8 @@ repos: ...@@ -66,8 +66,8 @@ repos:
hooks: hooks:
- id: flake8 - id: flake8
args: ["--config=.flake8"] args: ["--config=.flake8"]
- repo: https://github.com/charliermarsh/ruff-pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.0.254 rev: v0.0.272
hooks: hooks:
- id: ruff - id: ruff
args: [--fix, --exit-non-zero-on-fix, --no-cache] args: [--fix, --exit-non-zero-on-fix, --no-cache]
......
...@@ -493,10 +493,9 @@ class OperantsAPI(ForwardAPI): ...@@ -493,10 +493,9 @@ class OperantsAPI(ForwardAPI):
) )
first_input_type = " ".join(declare_args[0].split(" ")[:-1]) first_input_type = " ".join(declare_args[0].split(" ")[:-1])
# NOTE(HongyuJia): Do not consider "const paddle::optional<Tensor>&" # NOTE(HongyuJia): Do not consider "const paddle::optional<Tensor>&"
assert first_input_type == "const Tensor&", ( assert (
"Error! The first argument of Tensor Api %s must be Tensor, but received %s" first_input_type == "const Tensor&"
% (func_name, first_input_type) ), f"Error! The first argument of Tensor Api {func_name} must be Tensor, but received {first_input_type}"
)
for name in self.attrs['names']: for name in self.attrs['names']:
default_value = '' default_value = ''
if self.attrs['attr_info'][name][1] is not None: if self.attrs['attr_info'][name][1] is not None:
...@@ -515,10 +514,9 @@ class OperantsAPI(ForwardAPI): ...@@ -515,10 +514,9 @@ class OperantsAPI(ForwardAPI):
) )
first_input_type = " ".join(define_args[0].split(" ")[:-1]) first_input_type = " ".join(define_args[0].split(" ")[:-1])
# NOTE(HongyuJia): Do not consider "const paddle::optional<Tensor>&" # NOTE(HongyuJia): Do not consider "const paddle::optional<Tensor>&"
assert first_input_type == "const Tensor&", ( assert (
"Error! The first argument of Tensor Api %s must be Tensor, but received %s" first_input_type == "const Tensor&"
% (func_name, first_input_type) ), f"Error! The first argument of Tensor Api {func_name} must be Tensor, but received {first_input_type}"
)
for name in self.attrs['names']: for name in self.attrs['names']:
define_args.append(self.attrs['attr_info'][name][0] + ' ' + name) define_args.append(self.attrs['attr_info'][name][0] + ' ' + name)
# remove first Tensor argument # remove first Tensor argument
......
[tool.black] [tool.black]
exclude = "build"
line-length = 80 line-length = 80
skip-string-normalization = true skip-string-normalization = true
extend-exclude = '''
(
third_party/.+ # Exclude third_party directory
| build/.+ # Exclude build directory
)
'''
[tool.isort] [tool.isort]
profile = "black" profile = "black"
line_length = 80 line_length = 80
known_first_party = ["paddle"] known_first_party = ["paddle"]
skip = ["build", "__init__.py"] skip = ["build", "third_party", "__init__.py"]
extend_skip_glob = [ extend_skip_glob = [
# These files do not need to be formatted, # These files do not need to be formatted,
# see .flake8 for more details # see .flake8 for more details
"python/paddle/fluid/[!t]**", "python/paddle/fluid/**",
"python/paddle/fluid/tra**",
"python/paddle/utils/gast/**", "python/paddle/utils/gast/**",
# Temporarily ignore CINN files, it will fix later
"python/cinn/**",
"test/cinn/**",
] ]
[tool.ruff] [tool.ruff]
exclude = [ exclude = [
"./build", "./build",
"./python/paddle/fluid/[!t]**", "third_party",
"./python/paddle/fluid/tra**", "./python/paddle/fluid/**",
"./python/paddle/utils/gast/**", "./python/paddle/utils/gast/**",
# Temporarily ignore CINN files, it will fix later
"python/cinn/**",
"test/cinn/**",
] ]
target-version = "py37" target-version = "py37"
select = [ select = [
# Pyflakes # Pyflakes
"F401", "F",
# Comprehensions # Comprehensions
"C4", "C4",
...@@ -60,17 +70,9 @@ select = [ ...@@ -60,17 +70,9 @@ select = [
"B032", "B032",
# Pylint # Pylint
"PLE",
"PLC0414", "PLC0414",
"PLC3002", "PLC3002",
"PLE0100",
"PLE0101",
"PLE0604",
"PLE0605",
"PLE1142",
"PLE1205",
"PLE1206",
"PLE1307",
"PLE2502",
"PLR0206", "PLR0206",
"PLR0402", "PLR0402",
] ]
...@@ -78,6 +80,10 @@ unfixable = [ ...@@ -78,6 +80,10 @@ unfixable = [
"NPY001" "NPY001"
] ]
ignore = [ ignore = [
# `name` may be undefined, or defined from star imports: `module`
"F405",
# Local variable name is assigned to but never used
"F841",
# It not met the "Explicit is better than implicit" rule # It not met the "Explicit is better than implicit" rule
"UP015", "UP015",
# It will cause the performance regression on python3.10 # It will cause the performance regression on python3.10
...@@ -87,9 +93,13 @@ ignore = [ ...@@ -87,9 +93,13 @@ ignore = [
[tool.ruff.per-file-ignores] [tool.ruff.per-file-ignores]
# Ignore unused imports in __init__.py # Ignore unused imports in __init__.py
"__init__.py" = ["F401"] "__init__.py" = ["F401"]
# Ignore undefined variables in CMake config and some dygraph_to_static tests
".cmake-format.py" = ["F821"]
"test/dygraph_to_static/test_closure_analysis.py" = ["F821"]
"python/paddle/static/amp/decorator.py" = ["F821"]
# Ignore version check in setup.py # Ignore version check in setup.py
"setup.py" = ["UP036"] "setup.py" = ["UP036"]
# Ignore unnecessary comprehension in dy2st unittest test_loop # Ignore unnecessary comprehension in dy2st unittest test_loop
"test/dygraph_to_static/test_loop.py" = ["C416"] "test/dygraph_to_static/test_loop.py" = ["C416", "F821"]
# Ignore unnecessary lambda in dy2st unittest test_lambda # Ignore unnecessary lambda in dy2st unittest test_lambda
"test/dygraph_to_static/test_lambda.py" = ["PLC3002"] "test/dygraph_to_static/test_lambda.py" = ["PLC3002"]
...@@ -242,7 +242,7 @@ class ProgramHelper: ...@@ -242,7 +242,7 @@ class ProgramHelper:
# NOTE(dev): Because @to_static is a Lazy mechanism, so we explicitly call this to trigger # NOTE(dev): Because @to_static is a Lazy mechanism, so we explicitly call this to trigger
# generating Program IR immediately. # generating Program IR immediately.
getattr(self.proxy_layer, func_name).concrete_program getattr(self.proxy_layer, func_name).concrete_program # noqa: B018
self._build_startup_program() self._build_startup_program()
......
...@@ -204,15 +204,12 @@ class HybridCommunicateGroup: ...@@ -204,15 +204,12 @@ class HybridCommunicateGroup:
self._dp_degree, self._dp_degree,
) )
) )
debug_str += ( debug_str += ", mp_group: {}, sharding_group: {}, pp_group: {}, dp_group: {}, check/clip group: {}".format(
", mp_group: %s, sharding_group: %s, pp_group: %s, dp_group: %s, check/clip group: %s" self._mp_group,
% ( self._sharding_group,
self._mp_group, self._pp_group,
self._sharding_group, self._dp_group,
self._pp_group, self._check_group,
self._dp_group,
self._check_group,
)
) )
logger.info(debug_str) logger.info(debug_str)
......
...@@ -583,7 +583,7 @@ class UtilBase: ...@@ -583,7 +583,7 @@ class UtilBase:
global_block._remove_op(index) global_block._remove_op(index)
# if fetch_list have lod tensor # if fetch_list have lod tensor
return_numpy = all([v.lod_level == 0 for v in fetch_list]) return_numpy = all(v.lod_level == 0 for v in fetch_list)
# try dump fetch_targets # try dump fetch_targets
feed_tensors = [] feed_tensors = []
......
...@@ -366,8 +366,7 @@ class MultiSlotDataGenerator(DataGenerator): ...@@ -366,8 +366,7 @@ class MultiSlotDataGenerator(DataGenerator):
) )
if name != self._proto_info[index][0]: if name != self._proto_info[index][0]:
raise ValueError( raise ValueError(
"the field name of two given line are not match: require<%s>, get<%s>." f"the field name of two given line are not match: require<{self._proto_info[index][0]}>, get<{name}>."
% (self._proto_info[index][0], name)
) )
if output: if output:
output += " " output += " "
......
...@@ -307,8 +307,7 @@ class DatasetBase: ...@@ -307,8 +307,7 @@ class DatasetBase:
data_gen_len = len(user_parsed_line) data_gen_len = len(user_parsed_line)
if var_len != data_gen_len: if var_len != data_gen_len:
raise ValueError( raise ValueError(
"var length mismatch error: var_list = %s vs data_generator = %s" f"var length mismatch error: var_list = {var_len} vs data_generator = {data_gen_len}"
% (var_len, data_gen_len)
) )
for i, ele in enumerate(user_parsed_line): for i, ele in enumerate(user_parsed_line):
...@@ -324,10 +323,11 @@ class DatasetBase: ...@@ -324,10 +323,11 @@ class DatasetBase:
isinstance(ele, float) for ele in ele[1] isinstance(ele, float) for ele in ele[1]
): ):
raise TypeError( raise TypeError(
"var dtype mismatch error: var name = %s, var type in var_list = %s, while var in data_generator contains non-float value, which is %s \n" "var dtype mismatch error: var name = {}, var type in var_list = {}, while var in data_generator contains non-float value, which is {} \n"
"Please check if order of var_list and data_generator are aligned. \n" "Please check if order of var_list and data_generator are aligned. \n"
"Please check if var's type in data_generator is correct." "Please check if var's type in data_generator is correct.".format(
% (ele[0], "float", ele[1]) ele[0], "float", ele[1]
)
) )
if ( if (
...@@ -335,10 +335,11 @@ class DatasetBase: ...@@ -335,10 +335,11 @@ class DatasetBase:
or var_list[i].dtype == core.VarDesc.VarType.INT32 or var_list[i].dtype == core.VarDesc.VarType.INT32
) and not all(isinstance(ele, int) for ele in ele[1]): ) and not all(isinstance(ele, int) for ele in ele[1]):
raise TypeError( raise TypeError(
"var dtype mismatch error: var name = %s, var type in var_list = %s, while var in data_generator contains non-int value, which is %s \n" "var dtype mismatch error: var name = {}, var type in var_list = {}, while var in data_generator contains non-int value, which is {} \n"
"Please check if order of var_list and data_generator are aligned. \n" "Please check if order of var_list and data_generator are aligned. \n"
"Please check if var's type in data_generator is correct." "Please check if var's type in data_generator is correct.".format(
% (ele[0], "int", ele[1]) ele[0], "int", ele[1]
)
) )
else: else:
......
...@@ -695,8 +695,9 @@ def get_gpus(gpus): ...@@ -695,8 +695,9 @@ def get_gpus(gpus):
for x in gpus.split(','): for x in gpus.split(','):
assert x in cuda_visible_devices_list, ( assert x in cuda_visible_devices_list, (
"Can't find " "Can't find "
"your gpus %s in CUDA_VISIBLE_DEVICES[%s]." "your gpus {} in CUDA_VISIBLE_DEVICES[{}].".format(
% (x, cuda_visible_devices) x, cuda_visible_devices
)
) )
res_gpus = [ res_gpus = [
cuda_visible_devices_list.index(x.strip()) cuda_visible_devices_list.index(x.strip())
...@@ -1485,10 +1486,9 @@ class ParameterServerLauncher: ...@@ -1485,10 +1486,9 @@ class ParameterServerLauncher:
else: else:
self.current_node_ip = pod_ip self.current_node_ip = pod_ip
if not self.distribute_mode == DistributeMode.PS_HETER: if not self.distribute_mode == DistributeMode.PS_HETER:
assert self.current_node_ip in self.node_ips, ( assert (
"Can't find your local ip {%s} in args.servers and args.workers ips: {%s}" self.current_node_ip in self.node_ips
% (self.current_node_ip, self.node_ips) ), f"Can't find your local ip {{{self.current_node_ip}}} in args.servers and args.workers ips: {{{self.node_ips}}}"
)
if self.current_node_ip in self.node_ips: if self.current_node_ip in self.node_ips:
self.node_rank = self.node_ips.index(self.current_node_ip) self.node_rank = self.node_ips.index(self.current_node_ip)
logger.debug( logger.debug(
......
...@@ -69,9 +69,8 @@ def initialize_p2p_groups( ...@@ -69,9 +69,8 @@ def initialize_p2p_groups(
) = _hcg.get_p2p_groups() ) = _hcg.get_p2p_groups()
debug_str = ( debug_str = (
"P2pInfo: send_next_group: %s, send_prev_group: %s, " "P2pInfo: send_next_group: {}, send_prev_group: {}, "
"recv_next_group: %s, recv_prev_group: %s" "recv_next_group: {}, recv_prev_group: {}".format(
% (
repr(send_next_group), repr(send_next_group),
repr(send_prev_group), repr(send_prev_group),
repr(recv_next_group), repr(recv_next_group),
......
...@@ -79,10 +79,8 @@ class GroupShardedStage2(nn.Layer): ...@@ -79,10 +79,8 @@ class GroupShardedStage2(nn.Layer):
else sharding_optimizer else sharding_optimizer
) )
assert all( assert all(
[ isinstance(opt, GroupShardedOptimizerStage2)
isinstance(opt, GroupShardedOptimizerStage2) for opt in self._sharding_optimizers
for opt in self._sharding_optimizers
]
), "Please use GroupShardedOptimizerStage2 optimizer" ), "Please use GroupShardedOptimizerStage2 optimizer"
self._sync_buffers = sync_buffers self._sync_buffers = sync_buffers
self._auto_refresh_trainable = auto_refresh_trainable self._auto_refresh_trainable = auto_refresh_trainable
......
...@@ -124,7 +124,7 @@ class ParamStorage(InternalStorage): ...@@ -124,7 +124,7 @@ class ParamStorage(InternalStorage):
""" """
assert all( assert all(
[id(param) not in self._param_ids for param in trainable_params] id(param) not in self._param_ids for param in trainable_params
), "The same param cannot be checked in twice" ), "The same param cannot be checked in twice"
assert self.buffer is not None assert self.buffer is not None
......
...@@ -154,16 +154,12 @@ def print_metric(metric_ptr, name): ...@@ -154,16 +154,12 @@ def print_metric(metric_ptr, name):
""" """
if name.find("wuauc") != -1: if name.find("wuauc") != -1:
metric = metric_ptr.get_wuauc_metric_msg(name) metric = metric_ptr.get_wuauc_metric_msg(name)
monitor_msg = ( monitor_msg = f"{name}: User Count={metric[0]:.0f} INS Count={metric[1]:.0f} UAUC={metric[4]:.6f} WUAUC={metric[5]:.6f} "
"%s: User Count=%.0f INS Count=%.0f UAUC=%.6f WUAUC=%.6f "
% (name, metric[0], metric[1], metric[4], metric[5])
)
else: else:
metric = metric_ptr.get_metric_msg(name) metric = metric_ptr.get_metric_msg(name)
monitor_msg = ( monitor_msg = (
"%s: AUC=%.6f BUCKET_ERROR=%.6f MAE=%.6f RMSE=%.6f " "{}: AUC={:.6f} BUCKET_ERROR={:.6f} MAE={:.6f} RMSE={:.6f} "
"Actual CTR=%.6f Predicted CTR=%.6f COPC=%.6f INS Count=%.0f" "Actual CTR={:.6f} Predicted CTR={:.6f} COPC={:.6f} INS Count={:.0f}".format(
% (
name, name,
metric[0], metric[0],
metric[1], metric[1],
......
...@@ -425,8 +425,7 @@ class DataParallel(layers.Layer): ...@@ -425,8 +425,7 @@ class DataParallel(layers.Layer):
params_set.add(param) params_set.add(param)
if not isinstance(param, self.var_dtype): if not isinstance(param, self.var_dtype):
raise TypeError( raise TypeError(
"The data type of '%s' must be '%s'" f"The data type of '{param.name}' must be '{self.var_dtype}'"
% (param.name, self.var_dtype)
) )
if param.trainable: if param.trainable:
layers_param.append((sublayer, param)) layers_param.append((sublayer, param))
......
...@@ -55,7 +55,7 @@ class RecomputeState(ProgramStats): ...@@ -55,7 +55,7 @@ class RecomputeState(ProgramStats):
return self._reserved_vars return self._reserved_vars
def is_recompute(self): def is_recompute(self):
return any([is_recompute_op(op) for op in self.ops]) return any(is_recompute_op(op) for op in self.ops)
def build_states(self): def build_states(self):
for i, op in enumerate(self.ops): for i, op in enumerate(self.ops):
......
...@@ -177,7 +177,7 @@ def find_all_fuse_all_reduce_groups(block): ...@@ -177,7 +177,7 @@ def find_all_fuse_all_reduce_groups(block):
if in_var.type != core.VarDesc.VarType.LOD_TENSOR: if in_var.type != core.VarDesc.VarType.LOD_TENSOR:
return False return False
shape = in_var.shape shape = in_var.shape
if any([s <= 0 for s in shape]): if any(s <= 0 for s in shape):
return False return False
return True return True
......
...@@ -85,7 +85,7 @@ class PassBase(ABC): ...@@ -85,7 +85,7 @@ class PassBase(ABC):
def _check_conflict_including_common_rules(self, other_pass): def _check_conflict_including_common_rules(self, other_pass):
return self._check_conflict(other_pass) and all( return self._check_conflict(other_pass) and all(
[r(other_pass, self) for r in PassBase._COMMON_RULES] r(other_pass, self) for r in PassBase._COMMON_RULES
) )
def apply(self, main_programs, startup_programs, context=None): def apply(self, main_programs, startup_programs, context=None):
...@@ -96,10 +96,8 @@ class PassBase(ABC): ...@@ -96,10 +96,8 @@ class PassBase(ABC):
return context return context
if not all( if not all(
[ self._check_conflict_including_common_rules(p)
self._check_conflict_including_common_rules(p) for p in context.passes
for p in context.passes
]
): ):
return context return context
...@@ -325,10 +323,8 @@ def _solve_pass_conflict(passes, context): ...@@ -325,10 +323,8 @@ def _solve_pass_conflict(passes, context):
passes = [] passes = []
for p in old_passes: for p in old_passes:
if all( if all(
[ p._check_conflict_including_common_rules(applied_p)
p._check_conflict_including_common_rules(applied_p) for applied_p in context.passes
for applied_p in context.passes
]
): ):
passes.append(p) passes.append(p)
......
...@@ -211,9 +211,10 @@ def _get_subprocess_env_list(nprocs, options): ...@@ -211,9 +211,10 @@ def _get_subprocess_env_list(nprocs, options):
for card_id in selected_device_list: for card_id in selected_device_list:
if card_id not in env_devices_list: if card_id not in env_devices_list:
raise ValueError( raise ValueError(
"The selected gpu card %s cannot found in " "The selected gpu card {} cannot found in "
"CUDA_VISIBLE_DEVICES (%s)." "CUDA_VISIBLE_DEVICES ({}).".format(
% (card_id, ",".join(env_devices_list)) card_id, ",".join(env_devices_list)
)
) )
elif options['backend'] == 'bkcl': elif options['backend'] == 'bkcl':
...@@ -251,9 +252,10 @@ def _get_subprocess_env_list(nprocs, options): ...@@ -251,9 +252,10 @@ def _get_subprocess_env_list(nprocs, options):
for card_id in selected_device_list: for card_id in selected_device_list:
if card_id not in env_devices_list: if card_id not in env_devices_list:
raise ValueError( raise ValueError(
"The selected xpu card %s cannot found in " "The selected xpu card {} cannot found in "
"XPU_VISIBLE_DEVICES (%s)." "XPU_VISIBLE_DEVICES ({}).".format(
% (card_id, ",".join(env_devices_list)) card_id, ",".join(env_devices_list)
)
) )
elif options['backend'] == 'gloo': elif options['backend'] == 'gloo':
# TODO check gpu / xpu flag must not exist # TODO check gpu / xpu flag must not exist
......
...@@ -82,8 +82,9 @@ def get_gpus(selected_gpus): ...@@ -82,8 +82,9 @@ def get_gpus(selected_gpus):
for x in selected_gpus.split(','): for x in selected_gpus.split(','):
assert x in cuda_visible_devices_list, ( assert x in cuda_visible_devices_list, (
"Can't find " "Can't find "
"your selected_gpus %s in CUDA_VISIBLE_DEVICES[%s]." "your selected_gpus {} in CUDA_VISIBLE_DEVICES[{}].".format(
% (x, cuda_visible_devices) x, cuda_visible_devices
)
) )
gpus = [ gpus = [
cuda_visible_devices_list.index(x.strip()) cuda_visible_devices_list.index(x.strip())
......
...@@ -1894,7 +1894,7 @@ class Model: ...@@ -1894,7 +1894,7 @@ class Model:
assert train_data is not None, "train_data must be given!" assert train_data is not None, "train_data must be given!"
if isinstance(batch_size, (tuple, list)) and all( if isinstance(batch_size, (tuple, list)) and all(
[isinstance(x, int) for x in batch_size] isinstance(x, int) for x in batch_size
): ):
assert ( assert (
len(batch_size) == 2 len(batch_size) == 2
......
...@@ -1640,10 +1640,9 @@ class FleetUtil: ...@@ -1640,10 +1640,9 @@ class FleetUtil:
total_ins_num_name, total_ins_num_name,
) )
self.rank0_print( self.rank0_print(
"%s global AUC=%.6f BUCKET_ERROR=%.6f MAE=%.6f " "{} global AUC={:.6f} BUCKET_ERROR={:.6f} MAE={:.6f} "
"RMSE=%.6f Actural_CTR=%.6f Predicted_CTR=%.6f " "RMSE={:.6f} Actural_CTR={:.6f} Predicted_CTR={:.6f} "
"COPC=%.6f MEAN Q_VALUE=%.6f Ins number=%s" "COPC={:.6f} MEAN Q_VALUE={:.6f} Ins number={}".format(
% (
print_prefix, print_prefix,
auc, auc,
bucket_error, bucket_error,
......
...@@ -76,8 +76,7 @@ class DownpourServer(Server): ...@@ -76,8 +76,7 @@ class DownpourServer(Server):
return return
else: else:
raise ValueError( raise ValueError(
"expect table %s type=%s, but actual type=%s" f"expect table {table_id} type={pslib.PS_SPARSE_TABLE}, but actual type={table.type}"
% (table_id, pslib.PS_SPARSE_TABLE, table.type)
) )
if strategy is None: if strategy is None:
strategy = {} strategy = {}
...@@ -388,8 +387,7 @@ class DownpourServer(Server): ...@@ -388,8 +387,7 @@ class DownpourServer(Server):
return return
else: else:
raise ValueError( raise ValueError(
"expect table %s type=%s, but actual type=%s" f"expect table {table_id} type={pslib.PS_DENSE_TABLE}, but actual type={table.type}"
% (table_id, pslib.PS_DENSE_TABLE, table.type)
) )
if strategy is None: if strategy is None:
...@@ -480,8 +478,7 @@ class DownpourServer(Server): ...@@ -480,8 +478,7 @@ class DownpourServer(Server):
return return
else: else:
raise ValueError( raise ValueError(
"expect table %s type=%s, but actual type=%s" f"expect table {table_id} type={pslib.PS_DENSE_TABLE}, but actual type={table.type}"
% (table_id, pslib.PS_DENSE_TABLE, table.type)
) )
if strategy is None: if strategy is None:
strategy = {} strategy = {}
......
...@@ -277,8 +277,7 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -277,8 +277,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
def _check_params_grads(self, params, grads): def _check_params_grads(self, params, grads):
if len(params) != len(grads): if len(params) != len(grads):
raise ValueError( raise ValueError(
"params size != grads size, %s vs %s" f"params size != grads size, {len(params)} vs {len(grads)}"
% (len(params), len(grads))
) )
pname2grad = {} pname2grad = {}
...@@ -353,8 +352,7 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -353,8 +352,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
d_size[table_name] = emb_size d_size[table_name] = emb_size
elif d_size[table_name] != emb_size: elif d_size[table_name] != emb_size:
raise ValueError( raise ValueError(
"embedding size error: %s vs %s" f"embedding size error: {emb_size} vs {d_size[table_name]}"
% (emb_size, d_size[table_name])
) )
return d_size return d_size
...@@ -384,9 +382,10 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -384,9 +382,10 @@ class DistributedAdam(DistributedOptimizerImplBase):
and st["sparse_embedx_dim"] != emb_to_size[table_name] - 3 and st["sparse_embedx_dim"] != emb_to_size[table_name] - 3
): ):
raise ValueError( raise ValueError(
"fleet config sparse_embedx_dim=%s not" "fleet config sparse_embedx_dim={} not"
" equal to embedding dim - 3 = %s" " equal to embedding dim - 3 = {}".format(
% (st["sparse_embedx_dim"], emb_to_size[table_name] - 3) st["sparse_embedx_dim"], emb_to_size[table_name] - 3
)
) )
if ( if (
st.get("sparse_embedx_dim") is not None st.get("sparse_embedx_dim") is not None
...@@ -394,9 +393,10 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -394,9 +393,10 @@ class DistributedAdam(DistributedOptimizerImplBase):
and st["sparse_embedx_dim"] != emb_to_size[table_name] - 1 and st["sparse_embedx_dim"] != emb_to_size[table_name] - 1
): ):
raise ValueError( raise ValueError(
"fleet config sparse_embedx_dim=%s not" "fleet config sparse_embedx_dim={} not"
" equal to embedding dim - 1 = %s" " equal to embedding dim - 1 = {}".format(
% (st["sparse_embedx_dim"], emb_to_size[table_name] - 1) st["sparse_embedx_dim"], emb_to_size[table_name] - 1
)
) )
if ( if (
st.get("sparse_embedx_dim") is None st.get("sparse_embedx_dim") is None
...@@ -432,9 +432,10 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -432,9 +432,10 @@ class DistributedAdam(DistributedOptimizerImplBase):
and st["sparse_embedx_dim"] != emb_to_size[table_name] and st["sparse_embedx_dim"] != emb_to_size[table_name]
): ):
raise ValueError( raise ValueError(
"fleet config sparse_embedx_dim=%s not" "fleet config sparse_embedx_dim={} not"
" equal to embedding dim = %s" " equal to embedding dim = {}".format(
% (st["sparse_embedx_dim"], emb_to_size[table_name]) st["sparse_embedx_dim"], emb_to_size[table_name]
)
) )
if st.get("sparse_embedx_dim") is None: if st.get("sparse_embedx_dim") is None:
logger.warning( logger.warning(
...@@ -603,8 +604,7 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -603,8 +604,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
else: else:
if len(ps_param.trainer_param) != len(prog_id_to_worker): if len(ps_param.trainer_param) != len(prog_id_to_worker):
raise ValueError( raise ValueError(
"trainer param size != program size, %s vs %s" f"trainer param size != program size, {len(ps_param.trainer_param)} vs {len(prog_id_to_worker)}"
% (len(ps_param.trainer_param), len(prog_id_to_worker))
) )
idx = 0 idx = 0
# prog_id_to_worker is OrderedDict # prog_id_to_worker is OrderedDict
...@@ -682,9 +682,10 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -682,9 +682,10 @@ class DistributedAdam(DistributedOptimizerImplBase):
and st["sparse_embedx_dim"] != emb_to_size[key] - 3 and st["sparse_embedx_dim"] != emb_to_size[key] - 3
): ):
raise ValueError( raise ValueError(
"fleet config sparse_embedx_dim=%s not" "fleet config sparse_embedx_dim={} not"
" equal to embedding size - 3 = %s" " equal to embedding size - 3 = {}".format(
% (st["sparse_embedx_dim"], emb_to_size[key] - 3) st["sparse_embedx_dim"], emb_to_size[key] - 3
)
) )
st["sparse_embedx_dim"] = emb_to_size[key] - 3 st["sparse_embedx_dim"] = emb_to_size[key] - 3
elif accessor == "DownpourSparseValueAccessor": elif accessor == "DownpourSparseValueAccessor":
...@@ -693,9 +694,10 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -693,9 +694,10 @@ class DistributedAdam(DistributedOptimizerImplBase):
and st["sparse_embedx_dim"] != emb_to_size[key] and st["sparse_embedx_dim"] != emb_to_size[key]
): ):
raise ValueError( raise ValueError(
"fleet config sparse_embedx_dim=%s not" "fleet config sparse_embedx_dim={} not"
" equal to embedding size = %s" " equal to embedding size = {}".format(
% (st["sparse_embedx_dim"], emb_to_size[key]) st["sparse_embedx_dim"], emb_to_size[key]
)
) )
st["sparse_embedx_dim"] = emb_to_size[key] st["sparse_embedx_dim"] = emb_to_size[key]
......
...@@ -321,7 +321,7 @@ def try_load_model_vars( ...@@ -321,7 +321,7 @@ def try_load_model_vars(
global_block._remove_op(index) global_block._remove_op(index)
# if fetch_list have lod tensor # if fetch_list have lod tensor
return_numpy = all([v.lod_level == 0 for v in fetch_list]) return_numpy = all(v.lod_level == 0 for v in fetch_list)
# try dump fetch_targets # try dump fetch_targets
feed_tensors = [] feed_tensors = []
......
...@@ -108,7 +108,7 @@ class PaddedSeqLenInfo(SeqLenInfo): ...@@ -108,7 +108,7 @@ class PaddedSeqLenInfo(SeqLenInfo):
@classmethod @classmethod
def from_seqlens_padded(cls, seqlens, padding): def from_seqlens_padded(cls, seqlens, padding):
assert all([seqlen <= padding for seqlen in seqlens]) assert all(seqlen <= padding for seqlen in seqlens)
seqstart_py = list(range(0, len(seqlens) * padding + 1, padding)) seqstart_py = list(range(0, len(seqlens) * padding + 1, padding))
return cls( return cls(
seqlen=paddle.to_tensor(seqlens, dtype=paddle.int32), seqlen=paddle.to_tensor(seqlens, dtype=paddle.int32),
......
...@@ -273,7 +273,7 @@ class TensorDataset(Dataset): ...@@ -273,7 +273,7 @@ class TensorDataset(Dataset):
"TensorDataset con only be used in imperative mode" "TensorDataset con only be used in imperative mode"
) )
assert all( assert all(
[tensor.shape[0] == tensors[0].shape[0] for tensor in tensors] tensor.shape[0] == tensors[0].shape[0] for tensor in tensors
), "tensors not have same shape of the 1st dimension" ), "tensors not have same shape of the 1st dimension"
self.tensors = tensors self.tensors = tensors
......
...@@ -599,7 +599,7 @@ def convert_shape(x): ...@@ -599,7 +599,7 @@ def convert_shape(x):
""" """
def has_negative(list_shape): def has_negative(list_shape):
return any([x < 0 for x in list_shape]) return any(x < 0 for x in list_shape)
# When `x` is Variable: # When `x` is Variable:
# (1) if x.shape contains -1, such as [2, -1, 64], returns [2, var, 64], # (1) if x.shape contains -1, such as [2, -1, 64], returns [2, var, 64],
......
...@@ -583,10 +583,8 @@ class PartialProgramLayer: ...@@ -583,10 +583,8 @@ class PartialProgramLayer:
filter( filter(
lambda x: x[0] >= start_idx lambda x: x[0] >= start_idx
and any( and any(
[ out_arg == var_grad_name
out_arg == var_grad_name for out_arg in x[1].output_arg_names
for out_arg in x[1].output_arg_names
]
), ),
enumerate(target_program.block(0).ops), enumerate(target_program.block(0).ops),
) )
......
...@@ -102,7 +102,7 @@ def _update_padding_nd(padding, channel_last, num_dims): ...@@ -102,7 +102,7 @@ def _update_padding_nd(padding, channel_last, num_dims):
else: else:
padding_algorithm = "EXPLICIT" padding_algorithm = "EXPLICIT"
padding = convert_to_list(padding, num_dims, 'padding') padding = convert_to_list(padding, num_dims, 'padding')
if not all([p >= 0 for p in padding]): if not all(p >= 0 for p in padding):
raise ValueError( raise ValueError(
"Invalid padding, all value should be larger than or equal to 0, but received: {}".format( "Invalid padding, all value should be larger than or equal to 0, but received: {}".format(
padding padding
......
...@@ -749,7 +749,7 @@ def max_unpool1d( ...@@ -749,7 +749,7 @@ def max_unpool1d(
This API implements max unpooling 1d opereation. This API implements max unpooling 1d opereation.
`max_unpool1d` accepts the output of `max_pool1d` as input, `max_unpool1d` accepts the output of `max_pool1d` as input,
including the indices of the maximum value and calculate the partial inverse. including the indices of the maximum value and calculate the partial inverse.
All non-maximum values ​​are set to zero. All non-maximum values are set to zero.
- Input: :math:`(N, C, L_{in})` - Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C, L_{out})`, where - Output: :math:`(N, C, L_{out})`, where
...@@ -1025,7 +1025,7 @@ def max_unpool3d( ...@@ -1025,7 +1025,7 @@ def max_unpool3d(
This API implements max unpooling 3d opereation. This API implements max unpooling 3d opereation.
`max_unpool3d` accepts the output of `max_pool3d` as input, `max_unpool3d` accepts the output of `max_pool3d` as input,
including the indices of the maximum value and calculate the partial inverse. including the indices of the maximum value and calculate the partial inverse.
All non-maximum values ​​are set to zero. All non-maximum values are set to zero.
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
......
...@@ -1123,7 +1123,7 @@ class MaxUnPool1D(Layer): ...@@ -1123,7 +1123,7 @@ class MaxUnPool1D(Layer):
`max_unpool1d` accepts the output of `max_pool1d` as input, `max_unpool1d` accepts the output of `max_pool1d` as input,
including the indices of the maximum value and calculate the partial inverse. including the indices of the maximum value and calculate the partial inverse.
All non-maximum values ​​are set to zero. All non-maximum values are set to zero.
- Input: :math:`(N, C, L_{in})` - Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C, L_{out})`, where - Output: :math:`(N, C, L_{out})`, where
...@@ -1207,7 +1207,7 @@ class MaxUnPool2D(Layer): ...@@ -1207,7 +1207,7 @@ class MaxUnPool2D(Layer):
'max_unpool2d' accepts the output of 'max_unpool2d' as input 'max_unpool2d' accepts the output of 'max_unpool2d' as input
Including the indices of the maximum value and calculating the partial inverse Including the indices of the maximum value and calculating the partial inverse
All non-maximum values ​​are set to zero. All non-maximum values are set to zero.
Parameters: Parameters:
...@@ -1295,7 +1295,7 @@ class MaxUnPool3D(Layer): ...@@ -1295,7 +1295,7 @@ class MaxUnPool3D(Layer):
`max_unpool3d` accepts the output of `max_pool3d` as input, `max_unpool3d` accepts the output of `max_pool3d` as input,
including the indices of the maximum value and calculate the partial inverse. including the indices of the maximum value and calculate the partial inverse.
All non-maximum values ​​are set to zero. All non-maximum values are set to zero.
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
......
...@@ -1014,10 +1014,8 @@ class MultiStepDecay(LRScheduler): ...@@ -1014,10 +1014,8 @@ class MultiStepDecay(LRScheduler):
) )
if not all( if not all(
[ milestones[i] < milestones[i + 1]
milestones[i] < milestones[i + 1] for i in range(len(milestones) - 1)
for i in range(len(milestones) - 1)
]
): ):
raise ValueError('The elements of milestones must be incremented') raise ValueError('The elements of milestones must be incremented')
if gamma >= 1.0: if gamma >= 1.0:
......
...@@ -452,10 +452,8 @@ class DistributedSummary: ...@@ -452,10 +452,8 @@ class DistributedSummary:
# case 2: TracerEventType is Operator but is communication op # case 2: TracerEventType is Operator but is communication op
elif hostnode.type == TracerEventType.Operator and any( elif hostnode.type == TracerEventType.Operator and any(
[ name in hostnode.name.lower()
name in hostnode.name.lower() for name in _CommunicationOpName
for name in _CommunicationOpName
]
): ):
self.cpu_communication_range.append( self.cpu_communication_range.append(
(hostnode.start_ns, hostnode.end_ns) (hostnode.start_ns, hostnode.end_ns)
......
...@@ -804,7 +804,7 @@ def decorate( ...@@ -804,7 +804,7 @@ def decorate(
@overload(key=FunctionType.COMMON) @overload(key=FunctionType.COMMON)
def decorate( def decorate( # noqa: F811
optimizer, optimizer,
amp_lists=None, amp_lists=None,
level='O1', level='O1',
......
...@@ -75,7 +75,7 @@ def _check_args(caller, args, supported_args=None, deprecated_args=None): ...@@ -75,7 +75,7 @@ def _check_args(caller, args, supported_args=None, deprecated_args=None):
def _check_vars(name, var_list): def _check_vars(name, var_list):
if not isinstance(var_list, list): if not isinstance(var_list, list):
var_list = [var_list] var_list = [var_list]
if not all([isinstance(var, Variable) for var in var_list]): if not all(isinstance(var, Variable) for var in var_list):
raise ValueError( raise ValueError(
f"'{name}' should be a Variable or a list of Variable." f"'{name}' should be a Variable or a list of Variable."
) )
......
...@@ -473,7 +473,7 @@ def data_norm( ...@@ -473,7 +473,7 @@ def data_norm(
Args: Args:
input (Tensor): The input Tensor. input (Tensor): The input Tensor.
act (str, optional): Activation type, linear|relu|prelu|... Default: None. act (str, optional): Activation type, linear|relu|prelu|... Default: None.
epsilon(float, optional): Whether to add small values ​in​to the variance during calculations epsilon(float, optional): Whether to add small values into the variance during calculations
to prevent division by zero. Default: 1e-05. to prevent division by zero. Default: 1e-05.
param_attr (ParamAttr, optional): The parameter attribute for Parameter `scale`. Default: None. param_attr (ParamAttr, optional): The parameter attribute for Parameter `scale`. Default: None.
data_layout (str, optional): Specify the data format of the input, and the data format of the output data_layout (str, optional): Specify the data format of the input, and the data format of the output
...@@ -1243,8 +1243,9 @@ def conv3d( ...@@ -1243,8 +1243,9 @@ def conv3d(
if num_channels % groups != 0: if num_channels % groups != 0:
raise ValueError( raise ValueError(
"The number of input channels must be divisible by Attr(groups). " "The number of input channels must be divisible by Attr(groups). "
"Received: number of channels(%s), groups(%s)." "Received: number of channels({}), groups({}).".format(
% (str(num_channels), str(groups)) str(num_channels), str(groups)
)
) )
num_filter_channels = num_channels // groups num_filter_channels = num_channels // groups
......
...@@ -2108,11 +2108,9 @@ def assign(x, output=None): ...@@ -2108,11 +2108,9 @@ def assign(x, output=None):
if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input): if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
# We only deal with the case where the list is nested one level, convert all scalars into variables, and then use stack to process. It is necessary to ensure the consistency of types. # We only deal with the case where the list is nested one level, convert all scalars into variables, and then use stack to process. It is necessary to ensure the consistency of types.
if not all( if not all(
[ x.shape == (1,)
x.shape == (1,) for x in input
for x in input if isinstance(x, (Variable, core.eager.Tensor))
if isinstance(x, (Variable, core.eager.Tensor))
]
): ):
raise TypeError( raise TypeError(
"Unsupport paddle.assign([Variable, Variable...]) with non-scalar variable." "Unsupport paddle.assign([Variable, Variable...]) with non-scalar variable."
......
...@@ -78,8 +78,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): ...@@ -78,8 +78,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if binary_op and x.dtype != y.dtype: if binary_op and x.dtype != y.dtype:
raise ValueError( raise ValueError(
"(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s." f"(InvalidArgument) The DataType of {op_name} Op's Variable must be consistent, but received {x.dtype} and {y.dtype}."
% (op_name, x.dtype, y.dtype)
) )
if out is None: if out is None:
......
...@@ -1851,8 +1851,7 @@ def stack(x, axis=0, name=None): ...@@ -1851,8 +1851,7 @@ def stack(x, axis=0, name=None):
x = [x] x = [x]
else: else:
raise TypeError( raise TypeError(
"The type of '%s' in %s must be %s, but received %s" "The type of '{}' in {} must be {}, but received {}".format(
% (
'x', 'x',
'stack', 'stack',
'list[Tensor], tuple[Tensor] or TensorArray', 'list[Tensor], tuple[Tensor] or TensorArray',
......
...@@ -956,8 +956,7 @@ def multiply(x, y, name=None): ...@@ -956,8 +956,7 @@ def multiply(x, y, name=None):
else: else:
if x.dtype != y.dtype: if x.dtype != y.dtype:
raise TypeError( raise TypeError(
'Input tensors must be same type, but received type of x: %s, type of y: %s ' f'Input tensors must be same type, but received type of x: {x.dtype}, type of y: {y.dtype} '
% (x.dtype, y.dtype)
) )
return _elementwise_op(LayerHelper('elementwise_mul', **locals())) return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
...@@ -1891,8 +1890,9 @@ def mm(input, mat2, name=None): ...@@ -1891,8 +1890,9 @@ def mm(input, mat2, name=None):
raise ValueError( raise ValueError(
"After performing an optional transpose, Input X's width should be " "After performing an optional transpose, Input X's width should be "
"equal to Y's width for multiplication " "equal to Y's width for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s\n" "prerequisites. But received X's shape: {}, Y's shape: {}\n".format(
% (x_shape, y_shape) x_shape, y_shape
)
) )
if len(y_shape) > 2 and len(x_shape) > 2: if len(y_shape) > 2 and len(x_shape) > 2:
...@@ -2156,8 +2156,9 @@ def inner(x, y, name=None): ...@@ -2156,8 +2156,9 @@ def inner(x, y, name=None):
raise ValueError( raise ValueError(
"After performing an optional transpose, Input X's last dim should be " "After performing an optional transpose, Input X's last dim should be "
"equal to Y's last dim for multiplication " "equal to Y's last dim for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s\n" "prerequisites. But received X's shape: {}, Y's shape: {}\n".format(
% (x_shape, y_shape) x_shape, y_shape
)
) )
__check_input(nx, ny) __check_input(nx, ny)
......
...@@ -716,7 +716,7 @@ class BuildExtension(build_ext): ...@@ -716,7 +716,7 @@ class BuildExtension(build_ext):
for i, extension in enumerate(self.extensions): for i, extension in enumerate(self.extensions):
sources = [os.path.abspath(s) for s in extension.sources] sources = [os.path.abspath(s) for s in extension.sources]
if not self.contain_cuda_file: if not self.contain_cuda_file:
self.contain_cuda_file = any([is_cuda_file(s) for s in sources]) self.contain_cuda_file = any(is_cuda_file(s) for s in sources)
op_names = parse_op_name_from(sources) op_names = parse_op_name_from(sources)
for op_name in op_names: for op_name in op_names:
......
...@@ -1267,7 +1267,7 @@ def _write_setup_file( ...@@ -1267,7 +1267,7 @@ def _write_setup_file(
).lstrip() ).lstrip()
with_cuda = False with_cuda = False
if any([is_cuda_file(source) for source in sources]): if any(is_cuda_file(source) for source in sources):
with_cuda = True with_cuda = True
log_v(f"with_cuda: {with_cuda}", verbose) log_v(f"with_cuda: {with_cuda}", verbose)
......
...@@ -300,8 +300,9 @@ def _recursive_assert_same_structure(nest1, nest2, check_types): ...@@ -300,8 +300,9 @@ def _recursive_assert_same_structure(nest1, nest2, check_types):
if type_nest1 != type_nest2: if type_nest1 != type_nest2:
raise TypeError( raise TypeError(
"The two structures don't have the same sequence type. First " "The two structures don't have the same sequence type. First "
"structure has type %s, while second structure has type %s." "structure has type {}, while second structure has type {}.".format(
% (type_nest1, type_nest2) type_nest1, type_nest2
)
) )
if isinstance(nest1, dict): if isinstance(nest1, dict):
keys1 = set(nest1.keys()) keys1 = set(nest1.keys())
......
...@@ -49,8 +49,7 @@ else: ...@@ -49,8 +49,7 @@ else:
sys.version_info.minor sys.version_info.minor
): ):
raise RuntimeError( raise RuntimeError(
"You set PY_VERSION=%s, but your current python environment is %s, you should keep them consistent!" "You set PY_VERSION={}, but your current python environment is {}, you should keep them consistent!".format(
% (
os.getenv("PY_VERSION"), os.getenv("PY_VERSION"),
str(sys.version_info.major) str(sys.version_info.major)
+ '.' + '.'
......
...@@ -54,17 +54,17 @@ class FSTest1(FSTestBase): ...@@ -54,17 +54,17 @@ class FSTest1(FSTestBase):
s = """ s = """
java.io.IOException: Input/output error java.io.IOException: Input/output error
responseErrorMsg : failed to getFileStatus, errorCode: 3, path: /user/PUBLIC_KM_Data/wangxi16/data/serving_model, lparam: d868f6bb6822c621, errorMessage: inner error responseErrorMsg : failed to getFileStatus, errorCode: 3, path: /user/PUBLIC_KM_Data/wangxi16/data/serving_model, lparam: d868f6bb6822c621, errorMessage: inner error
at org.apache.hadoop.util.FileSystemUtil.throwException(FileSystemUtil.java:164) \tat org.apache.hadoop.util.FileSystemUtil.throwException(FileSystemUtil.java:164)
at org.apache.hadoop.util.FileSystemUtil.dealWithResponse(FileSystemUtil.java:118) \tat org.apache.hadoop.util.FileSystemUtil.dealWithResponse(FileSystemUtil.java:118)
at org.apache.hadoop.lite.client.LiteClientImpl.getFileStatus(LiteClientImpl.java:696) \tat org.apache.hadoop.lite.client.LiteClientImpl.getFileStatus(LiteClientImpl.java:696)
at org.apache.hadoop.fs.LibDFileSystemImpl.getFileStatus(LibDFileSystemImpl.java:297) \tat org.apache.hadoop.fs.LibDFileSystemImpl.getFileStatus(LibDFileSystemImpl.java:297)
at org.apache.hadoop.fs.LiteFileSystem.getFileStatus(LiteFileSystem.java:514) \tat org.apache.hadoop.fs.LiteFileSystem.getFileStatus(LiteFileSystem.java:514)
at org.apache.hadoop.fs.FsShell.test(FsShell.java:1092) \tat org.apache.hadoop.fs.FsShell.test(FsShell.java:1092)
at org.apache.hadoop.fs.FsShell.run(FsShell.java:2285) \tat org.apache.hadoop.fs.FsShell.run(FsShell.java:2285)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65) \tat org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:79) \tat org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:79)
at org.apache.hadoop.fs.FsShell.main(FsShell.java:2353) \tat org.apache.hadoop.fs.FsShell.main(FsShell.java:2353)
""" # fmt: off, avoid remove tabs in string """
print("split lines:", s.splitlines()) print("split lines:", s.splitlines())
self.assertIsNotNone(fs._test_match(s.splitlines())) self.assertIsNotNone(fs._test_match(s.splitlines()))
......
...@@ -218,12 +218,11 @@ class param(_param): ...@@ -218,12 +218,11 @@ class param(_param):
if "after * must be" not in str(e): if "after * must be" not in str(e):
raise raise
raise TypeError( raise TypeError(
"Parameters must be tuples, but %r is not (hint: use '(%r, )')" f"Parameters must be tuples, but {args!r} is not (hint: use '({args!r}, )')",
% (args, args),
) )
def __repr__(self): def __repr__(self):
return "param(*%r, **%r)" % self return "param(*{!r}, **{!r})".format(*self)
def to_safe_name(s): def to_safe_name(s):
......
...@@ -97,7 +97,7 @@ class TestStaticFunctionInstance(unittest.TestCase): ...@@ -97,7 +97,7 @@ class TestStaticFunctionInstance(unittest.TestCase):
self.assertNotEqual(net_1.forward, net_2.forward) self.assertNotEqual(net_1.forward, net_2.forward)
# convert layer into static progam of net_1 # convert layer into static progam of net_1
net_1.forward.concrete_program net_1.forward.concrete_program # noqa: B018
self.assertTrue(len(net_1.forward.program_cache) == 1) self.assertTrue(len(net_1.forward.program_cache) == 1)
# check no conversion applid with net_2 # check no conversion applid with net_2
self.assertTrue(len(net_2.forward.program_cache) == 0) self.assertTrue(len(net_2.forward.program_cache) == 0)
...@@ -317,7 +317,7 @@ class TestDifferentInputSpecCacheProgram(unittest.TestCase): ...@@ -317,7 +317,7 @@ class TestDifferentInputSpecCacheProgram(unittest.TestCase):
# raise error # raise error
foo_3 = paddle.jit.to_static(foo_func) foo_3 = paddle.jit.to_static(foo_func)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
foo_3.concrete_program foo_3.concrete_program # noqa: B018
class TestInputDefaultName(unittest.TestCase): class TestInputDefaultName(unittest.TestCase):
...@@ -397,17 +397,17 @@ class TestErrorWithInitFromStaticMode(unittest.TestCase): ...@@ -397,17 +397,17 @@ class TestErrorWithInitFromStaticMode(unittest.TestCase):
with self.assertRaisesRegex( with self.assertRaisesRegex(
RuntimeError, "only available in dynamic mode" RuntimeError, "only available in dynamic mode"
): ):
net.forward.concrete_program net.forward.concrete_program # noqa: B018
with self.assertRaisesRegex( with self.assertRaisesRegex(
RuntimeError, "only available in dynamic mode" RuntimeError, "only available in dynamic mode"
): ):
net.forward.inputs net.forward.inputs # noqa: B018
with self.assertRaisesRegex( with self.assertRaisesRegex(
RuntimeError, "only available in dynamic mode" RuntimeError, "only available in dynamic mode"
): ):
net.forward.outputs net.forward.outputs # noqa: B018
class CallNonForwardFuncNet(paddle.nn.Layer): class CallNonForwardFuncNet(paddle.nn.Layer):
......
...@@ -613,7 +613,7 @@ class TestFindStatiConvertVarShapeSuffixVar(unittest.TestCase): ...@@ -613,7 +613,7 @@ class TestFindStatiConvertVarShapeSuffixVar(unittest.TestCase):
x_spec = paddle.static.InputSpec(shape=[None, 10]) x_spec = paddle.static.InputSpec(shape=[None, 10])
func = paddle.jit.to_static(dyfunc_with_if_2, input_spec=[x_spec]) func = paddle.jit.to_static(dyfunc_with_if_2, input_spec=[x_spec])
# Call this function to trigger program translation. # Call this function to trigger program translation.
func.concrete_program func.concrete_program # noqa: B018
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -2228,9 +2228,8 @@ class OpTest(unittest.TestCase): ...@@ -2228,9 +2228,8 @@ class OpTest(unittest.TestCase):
atol=atol, atol=atol,
equal_nan=False, equal_nan=False,
err_msg=( err_msg=(
"Operator %s error, %s variable %s (shape: %s, dtype: %s) max gradient diff over limit" "Operator {} error, {} variable {} (shape: {}, dtype: {}) max gradient diff over limit"
) ).format(
% (
self.op_type, self.op_type,
msg_prefix, msg_prefix,
name, name,
......
...@@ -92,7 +92,7 @@ def make_jacobian(x, y_size, np_dtype): ...@@ -92,7 +92,7 @@ def make_jacobian(x, y_size, np_dtype):
) )
return jacobians return jacobians
else: else:
None pass
def _compute_numerical_jacobian(program, x, y, place, scope, delta): def _compute_numerical_jacobian(program, x, y, place, scope, delta):
...@@ -321,10 +321,11 @@ def grad_check( ...@@ -321,10 +321,11 @@ def grad_check(
n = numerical[x_idx][y_idx] n = numerical[x_idx][y_idx]
if not np.allclose(a, n, rtol, atol): if not np.allclose(a, n, rtol, atol):
msg = ( msg = (
'Jacobian mismatch for output %s ' 'Jacobian mismatch for output {} '
'with respect to input %s on %s,\n' 'with respect to input {} on {},\n'
'numerical:%s\nanalytical:%s\n' 'numerical:{}\nanalytical:{}\n'.format(
% (y[y_idx].name, x[x_idx].name, str(place), n, a) y[y_idx].name, x[x_idx].name, str(place), n, a
)
) )
return fail_test(msg) return fail_test(msg)
return True return True
......
...@@ -620,9 +620,8 @@ class PrimForwardChecker: ...@@ -620,9 +620,8 @@ class PrimForwardChecker:
# check static forward # check static forward
if len(ret) != len(self.eager_desire): if len(ret) != len(self.eager_desire):
msg = ( msg = (
"The static comp forward api out tensor nums is different with eager forward api out tensor nums on %s." "The static comp forward api out tensor nums is different with eager forward api out tensor nums on {}."
'when enable_fw_comp is %s, static comp forward api out tensor nums = %s, eager forward api out tensor nums = %s. \n' 'when enable_fw_comp is {}, static comp forward api out tensor nums = {}, eager forward api out tensor nums = {}. \n'.format(
% (
str(self.place), str(self.place),
self.enable_fw_comp, self.enable_fw_comp,
len(ret), len(ret),
...@@ -699,9 +698,8 @@ class PrimForwardChecker: ...@@ -699,9 +698,8 @@ class PrimForwardChecker:
# check jit comp forward # check jit comp forward
if len(ret) != len(self.eager_desire): if len(ret) != len(self.eager_desire):
msg = ( msg = (
"The jit comp forward api out tensor nums is different with eager forward api out tensor nums on %s." "The jit comp forward api out tensor nums is different with eager forward api out tensor nums on {}."
'when enable_fw_comp is %s, jit comp forward api out tensor nums = %s, eager forward api out tensor nums = %s. \n' 'when enable_fw_comp is {}, jit comp forward api out tensor nums = {}, eager forward api out tensor nums = {}. \n'.format(
% (
str(self.place), str(self.place),
self.enable_fw_comp, self.enable_fw_comp,
len(ret), len(ret),
...@@ -795,9 +793,8 @@ class PrimForwardChecker: ...@@ -795,9 +793,8 @@ class PrimForwardChecker:
# check jit comp forward # check jit comp forward
if len(ret) != len(self.eager_desire): if len(ret) != len(self.eager_desire):
msg = ( msg = (
"The jit comp with cinn forward api out tensor nums is different with eager forward api out tensor nums on %s." "The jit comp with cinn forward api out tensor nums is different with eager forward api out tensor nums on {}."
'when enable_fw_comp is %s, enable_cinn is %s, jit comp forward api out tensor nums = %s, eager forward api out tensor nums = %s. \n' 'when enable_fw_comp is {}, enable_cinn is {}, jit comp forward api out tensor nums = {}, eager forward api out tensor nums = {}. \n'.format(
% (
str(self.place), str(self.place),
self.enable_fw_comp, self.enable_fw_comp,
core.is_compiled_with_cinn() and self.enable_cinn, core.is_compiled_with_cinn() and self.enable_cinn,
...@@ -869,8 +866,8 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -869,8 +866,8 @@ class PrimGradChecker(PrimForwardChecker):
def get_output_dict(self, np_outputs, api_outputs, outputs_sig): def get_output_dict(self, np_outputs, api_outputs, outputs_sig):
assert len(api_outputs) <= len(outputs_sig), ( assert len(api_outputs) <= len(outputs_sig), (
"forward api outputs length must be the less than or equal to KernelSignature outputs,but receive %s and %s" "forward api outputs length must be the less than or equal to KernelSignature outputs,but receive {} and {}"
) % (len(api_outputs), len(outputs_sig)) ).format(len(api_outputs), len(outputs_sig))
output_dict = {} output_dict = {}
for i in range(len(api_outputs)): for i in range(len(api_outputs)):
output_name = outputs_sig[i] output_name = outputs_sig[i]
...@@ -992,9 +989,8 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -992,9 +989,8 @@ class PrimGradChecker(PrimForwardChecker):
# check static forward # check static forward
if len(actual_ret) != len(self.eager_desire): if len(actual_ret) != len(self.eager_desire):
msg = ( msg = (
"The eager comp grad out tensor nums is different with eager grad out tensor nums on %s." "The eager comp grad out tensor nums is different with eager grad out tensor nums on {}."
'when enable_rev_comp is %s, eager comp grad api out tensor nums = %s, eager grad out tensor nums = %s. \n' 'when enable_rev_comp is {}, eager comp grad api out tensor nums = {}, eager grad out tensor nums = {}. \n'.format(
% (
str(self.place), str(self.place),
self.enable_rev_comp, self.enable_rev_comp,
len(actual_ret), len(actual_ret),
...@@ -1098,9 +1094,8 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -1098,9 +1094,8 @@ class PrimGradChecker(PrimForwardChecker):
# check static grad out # check static grad out
if len(actual_ret) != len(self.eager_desire): if len(actual_ret) != len(self.eager_desire):
msg = ( msg = (
"The static comp grad out tensor nums is different with eager grad out tensor nums on %s." "The static comp grad out tensor nums is different with eager grad out tensor nums on {}."
'when enable_fw_comp is %s,enable_rev_comp is %s, static comp grad out tensor nums = %s, eager grad out tensor nums = %s. \n' 'when enable_fw_comp is {},enable_rev_comp is {}, static comp grad out tensor nums = {}, eager grad out tensor nums = {}. \n'.format(
% (
str(self.place), str(self.place),
self.enable_fw_comp, self.enable_fw_comp,
self.enable_rev_comp, self.enable_rev_comp,
...@@ -1215,9 +1210,8 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -1215,9 +1210,8 @@ class PrimGradChecker(PrimForwardChecker):
# check jit comp grad out # check jit comp grad out
if len(ret) != len(self.eager_desire): if len(ret) != len(self.eager_desire):
msg = ( msg = (
"The jit comp grad out tensor nums is different with eager grad out tensor nums on %s." "The jit comp grad out tensor nums is different with eager grad out tensor nums on {}."
'when enable_fw_comp is %s, enable_rev_comp is %s, jit comp grad out tensor nums = %s, eager grad out tensor nums = %s. \n' 'when enable_fw_comp is {}, enable_rev_comp is {}, jit comp grad out tensor nums = {}, eager grad out tensor nums = {}. \n'.format(
% (
str(self.place), str(self.place),
self.enable_fw_comp, self.enable_fw_comp,
self.enable_rev_comp, self.enable_rev_comp,
...@@ -1346,9 +1340,8 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -1346,9 +1340,8 @@ class PrimGradChecker(PrimForwardChecker):
# check jit comp grad out # check jit comp grad out
if len(ret) != len(self.eager_desire): if len(ret) != len(self.eager_desire):
msg = ( msg = (
"The jit comp with cinn grad out tensor nums is different with eager grad out tensor nums on %s." "The jit comp with cinn grad out tensor nums is different with eager grad out tensor nums on {}."
'when enable_fw_comp is %s, enable_rev_comp is %s, enable_cinn is %s, jit comp grad out tensor nums = %s, eager grad out tensor nums = %s. \n' 'when enable_fw_comp is {}, enable_rev_comp is {}, enable_cinn is {}, jit comp grad out tensor nums = {}, eager grad out tensor nums = {}. \n'.format(
% (
str(self.place), str(self.place),
self.enable_fw_comp, self.enable_fw_comp,
self.enable_rev_comp, self.enable_rev_comp,
......
...@@ -171,7 +171,6 @@ class TestBilateralSliceOp(OpTest): ...@@ -171,7 +171,6 @@ class TestBilateralSliceOp(OpTest):
def test_check_output(self): def test_check_output(self):
place = paddle.fluid.CUDAPlace(0) place = paddle.fluid.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5) self.check_output_with_place(place, atol=1e-5)
self.check_output
def test_check_grad(self): def test_check_grad(self):
place = paddle.fluid.CUDAPlace(0) place = paddle.fluid.CUDAPlace(0)
......
...@@ -56,7 +56,7 @@ class TestCallbacks(unittest.TestCase): ...@@ -56,7 +56,7 @@ class TestCallbacks(unittest.TestCase):
def test_earlystopping(self): def test_earlystopping(self):
paddle.seed(2020) paddle.seed(2020)
for dynamic in [True, False]: for dynamic in [True, False]:
paddle.enable_static if not dynamic else None paddle.enable_static() if not dynamic else None
device = paddle.set_device('cpu') device = paddle.set_device('cpu')
sample_num = 100 sample_num = 100
train_dataset = MnistDataset(mode='train', sample_num=sample_num) train_dataset = MnistDataset(mode='train', sample_num=sample_num)
......
...@@ -1337,7 +1337,6 @@ class TestDistBase(unittest.TestCase): ...@@ -1337,7 +1337,6 @@ class TestDistBase(unittest.TestCase):
"PADDLE_TRAINER_ID": f"{trainer_id}", "PADDLE_TRAINER_ID": f"{trainer_id}",
"PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints, "PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
"PADDLE_CURRENT_ENDPOINT": ep, "PADDLE_CURRENT_ENDPOINT": ep,
"PADDLE_CURRENT_ENDPOINT": ep,
"PADDLE_DISTRI_BACKEND": "gloo", "PADDLE_DISTRI_BACKEND": "gloo",
"GLOG_v": "2", "GLOG_v": "2",
} }
......
...@@ -90,10 +90,8 @@ class TestDownload(unittest.TestCase): ...@@ -90,10 +90,8 @@ class TestDownload(unittest.TestCase):
uncompressed_path = get_path_from_url(url, root_dir='./test_tar') uncompressed_path = get_path_from_url(url, root_dir='./test_tar')
self.assertTrue( self.assertTrue(
all( all(
[ os.path.exists(os.path.join("./test_tar", filepath))
os.path.exists(os.path.join("./test_tar", filepath)) for filepath in uncompressd_res
for filepath in uncompressd_res
]
) )
) )
...@@ -106,10 +104,8 @@ class TestDownload(unittest.TestCase): ...@@ -106,10 +104,8 @@ class TestDownload(unittest.TestCase):
uncompressed_path = get_path_from_url(url, root_dir='./test_zip') uncompressed_path = get_path_from_url(url, root_dir='./test_zip')
self.assertTrue( self.assertTrue(
all( all(
[ os.path.exists(os.path.join("./test_zip", filepath))
os.path.exists(os.path.join("./test_zip", filepath)) for filepath in uncompressd_res
for filepath in uncompressd_res
]
) )
) )
......
...@@ -103,9 +103,8 @@ class TestFeedData(unittest.TestCase): ...@@ -103,9 +103,8 @@ class TestFeedData(unittest.TestCase):
self._test_feed_data_shape_mismatch(use_cuda) self._test_feed_data_shape_mismatch(use_cuda)
self.assertEqual( self.assertEqual(
str(shape_mismatch_err.exception), str(shape_mismatch_err.exception),
"The fed Variable %r should have dimensions = %r, " "The fed Variable {!r} should have dimensions = {!r}, "
"shape = %r, but received fed shape %r on each device" "shape = {!r}, but received fed shape {!r} on each device".format(
% (
'data', 'data',
len(in_shape_tuple), len(in_shape_tuple),
in_shape_tuple, in_shape_tuple,
...@@ -117,8 +116,8 @@ class TestFeedData(unittest.TestCase): ...@@ -117,8 +116,8 @@ class TestFeedData(unittest.TestCase):
self._test_feed_data_dtype_mismatch(use_cuda) self._test_feed_data_dtype_mismatch(use_cuda)
self.assertEqual( self.assertEqual(
str(dtype_mismatch_err.exception), str(dtype_mismatch_err.exception),
"The data type of fed Variable %r must be 'int64', but " "The data type of fed Variable {!r} must be 'int64', but "
"received 'float64'" % ('label'), "received 'float64'".format('label'),
) )
def _test_feed_data_dtype_mismatch(self, use_cuda): def _test_feed_data_dtype_mismatch(self, use_cuda):
......
...@@ -93,9 +93,6 @@ class TestFullOpError(unittest.TestCase): ...@@ -93,9 +93,6 @@ class TestFullOpError(unittest.TestCase):
) )
output = paddle.full_like(input_data, 2.0) output = paddle.full_like(input_data, 2.0)
def test_input_dtype():
paddle.full_like
self.assertRaises( self.assertRaises(
TypeError, TypeError,
paddle.full_like, paddle.full_like,
......
...@@ -476,8 +476,7 @@ class TestDygraphGradientClipByGlobalNorm(TestDygraphGradientClip): ...@@ -476,8 +476,7 @@ class TestDygraphGradientClipByGlobalNorm(TestDygraphGradientClip):
b = global_norm_clip b = global_norm_clip
self.assertTrue( self.assertTrue(
np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8), np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8),
"gradient clip by global norm has wrong results, expetcd:%f, but received:%f" f"gradient clip by global norm has wrong results, expetcd:{a:f}, but received:{b:f}",
% (a, b),
) )
...@@ -505,8 +504,7 @@ class TestDygraphGradientClipByNorm(TestDygraphGradientClip): ...@@ -505,8 +504,7 @@ class TestDygraphGradientClipByNorm(TestDygraphGradientClip):
b = np.sqrt(np.sum(np.power(v, 2))) b = np.sqrt(np.sum(np.power(v, 2)))
self.assertTrue( self.assertTrue(
np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8), np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8),
"gradient clip by norm has wrong results, expetcd:%f, but received:%f" f"gradient clip by norm has wrong results, expetcd:{a:f}, but received:{b:f}",
% (a, b),
) )
...@@ -602,8 +600,7 @@ class TestDygraphGradientClipFP16(unittest.TestCase): ...@@ -602,8 +600,7 @@ class TestDygraphGradientClipFP16(unittest.TestCase):
b = global_norm_clip b = global_norm_clip
self.assertTrue( self.assertTrue(
np.isclose(a=a, b=b, rtol=1e-3, atol=1e-8), np.isclose(a=a, b=b, rtol=1e-3, atol=1e-8),
"gradient clip by global norm has wrong results, expetcd:%f, but received:%f" f"gradient clip by global norm has wrong results, expetcd:{a:f}, but received:{b:f}",
% (a, b),
) )
...@@ -647,8 +644,7 @@ class TestDygraphGradientClipFP64(unittest.TestCase): ...@@ -647,8 +644,7 @@ class TestDygraphGradientClipFP64(unittest.TestCase):
self.assertTrue( self.assertTrue(
np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8), np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8),
"gradient clip by global norm has wrong results, expetcd:%f, but received:%f" f"gradient clip by global norm has wrong results, expetcd:{a:f}, but received:{b:f}",
% (a, b),
) )
......
...@@ -23,8 +23,7 @@ from paddle import fluid ...@@ -23,8 +23,7 @@ from paddle import fluid
class VersionTest(unittest.TestCase): class VersionTest(unittest.TestCase):
def test_check_output(self): def test_check_output(self):
warnings.warn( warnings.warn(
"paddle.__version__: %s, fluid_version.full_version: %s, fluid_version.major: %s, fluid_version.minor: %s, fluid_version.patch: %s, fluid_version.rc: %s." "paddle.__version__: {}, fluid_version.full_version: {}, fluid_version.major: {}, fluid_version.minor: {}, fluid_version.patch: {}, fluid_version.rc: {}.".format(
% (
paddle.__version__, paddle.__version__,
fluid_version.full_version, fluid_version.full_version,
fluid_version.major, fluid_version.major,
......
...@@ -187,8 +187,7 @@ class TestCooSoftmax(unittest.TestCase): ...@@ -187,8 +187,7 @@ class TestCooSoftmax(unittest.TestCase):
) )
else: else:
print( print(
"`dim(=%s)` must be smaller than `sparse_dim(=%s) + dense_dim(=%s)`" f"`dim(={dim})` must be smaller than `sparse_dim(={sparse_dim}) + dense_dim(={dense_dim})`"
% (dim, sparse_dim, dense_dim)
) )
def check_run(self, dense_shape): def check_run(self, dense_shape):
......
...@@ -162,7 +162,6 @@ def create_bf16_test_class(parent): ...@@ -162,7 +162,6 @@ def create_bf16_test_class(parent):
dout[:, i], dout[:, i],
dout[:, i] * self.alpha[i], dout[:, i] * self.alpha[i],
) )
self.dx
elif self.mode == "element": elif self.mode == "element":
self.dx = np.where(self.x[:] > 0, dout[:], dout[:] * self.alpha) self.dx = np.where(self.x[:] > 0, dout[:], dout[:] * self.alpha)
......
...@@ -253,8 +253,7 @@ class TestImperativePTQ(unittest.TestCase): ...@@ -253,8 +253,7 @@ class TestImperativePTQ(unittest.TestCase):
self.assertTrue( self.assertTrue(
after_acc_top1 >= self.eval_acc_top1, after_acc_top1 >= self.eval_acc_top1,
msg="The test acc {%f} is less than {%f}." msg=f"The test acc {{{after_acc_top1:f}}} is less than {{{self.eval_acc_top1:f}}}.",
% (after_acc_top1, self.eval_acc_top1),
) )
self.assertTrue( self.assertTrue(
infer_acc_top1 >= after_acc_top1, infer_acc_top1 >= after_acc_top1,
...@@ -322,8 +321,7 @@ class TestImperativePTQfuse(TestImperativePTQ): ...@@ -322,8 +321,7 @@ class TestImperativePTQfuse(TestImperativePTQ):
# The acc of quantized model should be higher than 0.95. # The acc of quantized model should be higher than 0.95.
self.assertTrue( self.assertTrue(
after_acc_top1 >= self.eval_acc_top1, after_acc_top1 >= self.eval_acc_top1,
msg="The test acc {%f} is less than {%f}." msg=f"The test acc {{{after_acc_top1:f}}} is less than {{{self.eval_acc_top1:f}}}.",
% (after_acc_top1, self.eval_acc_top1),
) )
# Check the saved infer_model.The acc of infer model # Check the saved infer_model.The acc of infer model
# should not be lower than the one of dygraph model. # should not be lower than the one of dygraph model.
......
...@@ -220,13 +220,11 @@ class TestImperativeQatAmp(unittest.TestCase): ...@@ -220,13 +220,11 @@ class TestImperativeQatAmp(unittest.TestCase):
) )
_logger.info( _logger.info(
'fp32_acc_top1: %f, int8_acc_top1: %f' f'fp32_acc_top1: {fp32_acc_top1:f}, int8_acc_top1: {int8_acc_top1:f}'
% (fp32_acc_top1, int8_acc_top1)
) )
self.assertTrue( self.assertTrue(
int8_acc_top1 > fp32_acc_top1 - 0.01, int8_acc_top1 > fp32_acc_top1 - 0.01,
msg='fp32_acc_top1: %f, int8_acc_top1: %f' msg=f'fp32_acc_top1: {fp32_acc_top1:f}, int8_acc_top1: {int8_acc_top1:f}',
% (fp32_acc_top1, int8_acc_top1),
) )
input_spec = [ input_spec = [
......
...@@ -112,8 +112,7 @@ class FileReader: ...@@ -112,8 +112,7 @@ class FileReader:
if not isinstance(self._args[key], type): if not isinstance(self._args[key], type):
raise TypeError( raise TypeError(
"Invalid type of key [%s] in args dict, it should be a %s!" f"Invalid type of key [{key}] in args dict, it should be a {type}!"
% (key, type)
) )
exec(f"self._{key} = self._args[\"{key}\"]") exec(f"self._{key} = self._args[\"{key}\"]")
...@@ -206,8 +205,9 @@ class FileReader: ...@@ -206,8 +205,9 @@ class FileReader:
) )
else: else:
self._logger.info( self._logger.info(
"file list in dir [%s] is : %s !" "file list in dir [{}] is : {} !".format(
% (self._dataPath, ', '.join(self._fileList)) self._dataPath, ', '.join(self._fileList)
)
) )
return self._fileList return self._fileList
......
...@@ -63,8 +63,7 @@ class netFileReader(FileReader): ...@@ -63,8 +63,7 @@ class netFileReader(FileReader):
except Exception: except Exception:
self._logger.warning( self._logger.warning(
"invalid record [%s] in [%s]. skip it!" f"invalid record [{line[:-1]}] in [{fileName}]. skip it!"
% (line[:-1], fileName)
) )
traceInfo["traceEvents"] = traceEventList traceInfo["traceEvents"] = traceEventList
......
...@@ -25,12 +25,10 @@ def strToSecond(strTime): ...@@ -25,12 +25,10 @@ def strToSecond(strTime):
def getUsefulBuildTimeFile(filename): def getUsefulBuildTimeFile(filename):
os.system( os.system(
"grep -Po -- '-o .*' %s | grep ' elapsed' | grep -P -v '0:00.* elapse' > %s/tools/analysis_build_time" f"grep -Po -- '-o .*' {filename} | grep ' elapsed' | grep -P -v '0:00.* elapse' > {root_path}/tools/analysis_build_time"
% (filename, root_path)
) )
os.system( os.system(
"grep -v -- '-o .*' %s |grep ' elapse' | grep -P -v '0:00.* elapse' >> %s/tools/analysis_build_time" f"grep -v -- '-o .*' {filename} |grep ' elapse' | grep -P -v '0:00.* elapse' >> {root_path}/tools/analysis_build_time"
% (filename, root_path)
) )
...@@ -48,22 +46,19 @@ def analysisBuildTime(): ...@@ -48,22 +46,19 @@ def analysisBuildTime():
buildTime = line.split(', ')[1].split('elapsed')[0].strip() buildTime = line.split(', ')[1].split('elapsed')[0].strip()
secondTime = strToSecond(buildTime) secondTime = strToSecond(buildTime)
os.system( os.system(
"echo %s, %s >> %s/tools/tempbuildTime.txt" f"echo {buildFile}, {secondTime} >> {root_path}/tools/tempbuildTime.txt"
% (buildFile, secondTime, root_path)
) )
else: else:
buildTime = line.split(', ')[1].split('elapsed')[0].strip() buildTime = line.split(', ')[1].split('elapsed')[0].strip()
secondTime = strToSecond(buildTime) secondTime = strToSecond(buildTime)
if secondTime > 30: if secondTime > 30:
os.system( os.system(
"echo %s, %s >> %s/tools/tempbuildTime.txt" f"echo {line}, {secondTime} >> {root_path}/tools/tempbuildTime.txt"
% (line, secondTime, root_path)
) )
except ValueError: except ValueError:
print(line) print(line)
os.system( os.system(
'sort -n -k 2 -r %s/tools/tempbuildTime.txt > %s/tools/buildTime.txt' f'sort -n -k 2 -r {root_path}/tools/tempbuildTime.txt > {root_path}/tools/buildTime.txt'
% (root_path, root_path)
) )
......
...@@ -83,12 +83,12 @@ def check_speed_result(case_name, develop_data, pr_data, pr_result): ...@@ -83,12 +83,12 @@ def check_speed_result(case_name, develop_data, pr_data, pr_result):
logging.info("------ OP: %s ------" % case_name) logging.info("------ OP: %s ------" % case_name)
logging.info( logging.info(
"GPU time change: %s (develop: %.7f -> PR: %.7f)" f"GPU time change: {gpu_time_diff_str} (develop: {develop_gpu_time:.7f} -> PR: {pr_gpu_time:.7f})"
% (gpu_time_diff_str, develop_gpu_time, pr_gpu_time)
) )
logging.info( logging.info(
"Total time change: %.5f%% (develop: %.7f -> PR: %.7f)" "Total time change: {:.5f}% (develop: {:.7f} -> PR: {:.7f})".format(
% (total_time_diff * 100, develop_total_time, pr_total_time) total_time_diff * 100, develop_total_time, pr_total_time
)
) )
logging.info("backward: %s" % pr_result.get("backward")) logging.info("backward: %s" % pr_result.get("backward"))
logging.info("parameters:") logging.info("parameters:")
......
...@@ -81,10 +81,7 @@ print_arguments() ...@@ -81,10 +81,7 @@ print_arguments()
# List the commits in mainline branch. # List the commits in mainline branch.
os.chdir(args.git_dir) os.chdir(args.git_dir)
ret = subprocess.check_output( ret = subprocess.check_output(
[ [f'git rev-list --first-parent {args.good_commit}...{args.bad_commit}'],
'git rev-list --first-parent %s...%s'
% (args.good_commit, args.bad_commit)
],
shell=True, shell=True,
) )
sys.stdout.write('commits found:\n%s\n' % ret) sys.stdout.write('commits found:\n%s\n' % ret)
...@@ -121,8 +118,9 @@ while True: ...@@ -121,8 +118,9 @@ while True:
# Link error can happen without complete clean up. # Link error can happen without complete clean up.
cmd = ( cmd = (
'rm -rf * && ' 'rm -rf * && '
'cmake -DWITH_TESTING=ON %s >> %s && make -j%s >> %s' 'cmake -DWITH_TESTING=ON {} >> {} && make -j{} >> {}'.format(
% (args.git_dir, args.log_file, args.build_parallel, args.log_file) args.git_dir, args.log_file, args.build_parallel, args.log_file
)
) )
sys.stdout.write('cmd: %s\n' % cmd) sys.stdout.write('cmd: %s\n' % cmd)
try: try:
......
...@@ -104,7 +104,7 @@ class PRChecker: ...@@ -104,7 +104,7 @@ class PRChecker:
def __urlretrieve(self, url, filename): def __urlretrieve(self, url, filename):
ix = 1 ix = 1
with_proxy = urllib.request.getproxies() with_proxy = urllib.request.getproxies()
without_proxy = {'http': '', 'http': ''} without_proxy = {'http': '', 'https': ''}
while ix < 6: while ix < 6:
if ix // 2 == 0: if ix // 2 == 0:
cur_proxy = urllib.request.ProxyHandler(without_proxy) cur_proxy = urllib.request.ProxyHandler(without_proxy)
......
...@@ -84,8 +84,7 @@ def analysisFNDAFile(rootPath, test): ...@@ -84,8 +84,7 @@ def analysisFNDAFile(rootPath, test):
notrelated_ut_map_file notrelated_ut_map_file
): ):
print( print(
"make %s and %s successfully" f"make {related_ut_map_file} and {related_ut_map_file} successfully"
% (related_ut_map_file, related_ut_map_file)
) )
else: else:
print(f"make {related_ut_map_file} and {related_ut_map_file} failed") print(f"make {related_ut_map_file} and {related_ut_map_file} failed")
...@@ -132,8 +131,7 @@ def analysisFNDAFile(rootPath, test): ...@@ -132,8 +131,7 @@ def analysisFNDAFile(rootPath, test):
clazz_filename not in related_file_list clazz_filename not in related_file_list
): # xx.pb.cc in RELATED xx.pb.h not in RELATED ): # xx.pb.cc in RELATED xx.pb.h not in RELATED
os.system( os.system(
'echo %s >> %s' f'echo {clazz_filename} >> {notrelated_ut_map_file}'
% (clazz_filename, notrelated_ut_map_file)
) )
f.close() f.close()
......
...@@ -34,8 +34,7 @@ def get_all_paddle_file(rootPath): ...@@ -34,8 +34,7 @@ def get_all_paddle_file(rootPath):
def get_all_uts(rootPath): def get_all_uts(rootPath):
all_uts_paddle = '%s/build/all_uts_paddle' % rootPath all_uts_paddle = '%s/build/all_uts_paddle' % rootPath
os.system( os.system(
r'cd %s/build && ctest -N -V | grep -Ei "Test[ \t]+#" | grep -oEi "\w+$" > %s' fr'cd {rootPath}/build && ctest -N -V | grep -Ei "Test[ \t]+#" | grep -oEi "\w+$" > {all_uts_paddle}'
% (rootPath, all_uts_paddle)
) )
......
...@@ -30,8 +30,7 @@ def group_case_for_parallel(rootPath): ...@@ -30,8 +30,7 @@ def group_case_for_parallel(rootPath):
'exclusive_card_tests_mem0', 'exclusive_card_tests_mem0',
]: ]:
os.system( os.system(
'cd %s/tools && wget --no-proxy https://paddle-docker-tar.bj.bcebos.com/pre_test_bak/%s --no-check-certificate' f'cd {rootPath}/tools && wget --no-proxy https://paddle-docker-tar.bj.bcebos.com/pre_test_bak/{filename} --no-check-certificate'
% (rootPath, filename)
) )
# get nightly tests # get nightly tests
......
...@@ -62,8 +62,7 @@ def insert_pile_to_h_file(rootPath): ...@@ -62,8 +62,7 @@ def insert_pile_to_h_file(rootPath):
os.system(f'echo "#define _PRECISE{func.upper()}_" >> {line}') os.system(f'echo "#define _PRECISE{func.upper()}_" >> {line}')
os.system('echo "\n#include <cstdio>\n" >> %s' % line) os.system('echo "\n#include <cstdio>\n" >> %s' % line)
os.system( os.system(
'echo "__attribute__((constructor)) static void calledFirst%s()\n{" >> %s' f'echo "__attribute__((constructor)) static void calledFirst{func}()\n{{" >> {line}'
% (func, line)
) )
os.system( os.system(
'echo \' fprintf(stderr,"precise test map fileeee: %%s\\\\n", __FILE__);\n}\' >> %s' 'echo \' fprintf(stderr,"precise test map fileeee: %%s\\\\n", __FILE__);\n}\' >> %s'
...@@ -118,8 +117,7 @@ def get_h_cu_file(file_path): ...@@ -118,8 +117,7 @@ def get_h_cu_file(file_path):
ut_path = f"{rootPath}/build/ut_map/{ut}" ut_path = f"{rootPath}/build/ut_map/{ut}"
if os.path.exists(ut_path): if os.path.exists(ut_path):
os.system( os.system(
"cat %s/%s | grep 'precise test map fileeee:'| uniq >> %s/build/ut_map/%s/related_%s.txt" f"cat {dir_path}/{filename} | grep 'precise test map fileeee:'| uniq >> {rootPath}/build/ut_map/{ut}/related_{ut}.txt"
% (dir_path, filename, rootPath, ut, ut)
) )
else: else:
print("%s has failed,no has direcotory" % ut) print("%s has failed,no has direcotory" % ut)
......
...@@ -130,10 +130,7 @@ def append_fluid_kernels(): ...@@ -130,10 +130,7 @@ def append_fluid_kernels():
new_content = content.replace(location_str, location_str + append_str) new_content = content.replace(location_str, location_str + append_str)
if new_content == content: if new_content == content:
print( print(f"ERROR: can not find \"{location_str}\" in file \"{file_name}\"")
"ERROR: can not find \"%s\" in file \"%s\""
% (location_str, file_name)
)
return False return False
with open(file_name, 'w', encoding='utf-8') as f: with open(file_name, 'w', encoding='utf-8') as f:
......
...@@ -264,11 +264,9 @@ def is_required_match(requirestr, cbtitle='not-specified'): ...@@ -264,11 +264,9 @@ def is_required_match(requirestr, cbtitle='not-specified'):
return None return None
if all( if all(
[ k in SAMPLE_CODE_TEST_CAPACITY
k in SAMPLE_CODE_TEST_CAPACITY for k in requires
for k in requires if k not in ['skip', 'skiptest']
if k not in ['skip', 'skiptest']
]
): ):
return True return True
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册