未验证 提交 8f65f72e 编写于 作者: N Nyakku Shigure 提交者: GitHub

bump ruff to 0.0.272 and update config (#54449)

* bump ruff to 0.0.271 and update config

* exclude third_party

* bump ruff to 0.0.272

* refine config
上级 4a77cf53
[flake8]
select = C,E,F,W
select = C,E,W
exclude =
./build,
# A trick to exclude fluid/ but keep fluid/tests/, see more at
# https://github.com/PaddlePaddle/Paddle/pull/46290#discussion_r976392010
./python/paddle/fluid/[!t]**,
./python/paddle/fluid/tra**,
# Exclude fluid directory
./python/paddle/fluid/**,
# Exclude third-party libraries
./third_party/**,
./python/paddle/utils/gast/**,
# Temporarily ignore CINN files, it will fix later
./python/cinn/**,
./test/cinn/**,
ignore =
# Whitespace before ‘,’, ‘;’, or ‘:’, it is not compatible with black
E203,
......@@ -23,20 +25,8 @@ ignore =
E731,
# Do not use variables named ‘l’, ‘O’, or ‘I’
E741,
# `name` may be undefined, or defined from star imports: `module`
F405,
# Local variable name is assigned to but never used
F841,
# Line break before binary operator, it is not compatible with black
W503
per-file-ignores =
# These files need tabs for testing.
test/dygraph_to_static/test_error.py:E101,W191
python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py:E101,W191
# Ignore unused imports in __init__.py
__init__.py: F401
# Ignore undefined variables in CMake config and some dygraph_to_static tests
.cmake-format.py: F821
test/dygraph_to_static/test_loop.py: F821
test/dygraph_to_static/test_closure_analysis.py: F821
python/paddle/static/amp/decorator.py: F811
......@@ -4,7 +4,8 @@ exclude: |
patches/.+|
paddle/fluid/framework/fleet/heter_ps/cudf/.+|
paddle/fluid/distributed/ps/thirdparty/round_robin.h|
python/paddle/utils/gast/.+
python/paddle/utils/gast/.+|
third_party/.+
)$
repos:
# Common hooks
......@@ -32,12 +33,11 @@ repos:
name: Tabs remover (Python)
files: (.*\.(py|bzl)|BUILD|.*\.BUILD|WORKSPACE)$
args: [--whitespaces-count, '4']
# Exclude the fluid directory but keep the fluid/tests directory.
# Exclude the fluid directory.
# And exclude some unit test files that require tabs.
exclude: |
(?x)^(
python/paddle/fluid/(?!tests).+|
python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py|
python/paddle/fluid/.+|
test/dygraph_to_static/test_error.py
)$
- repo: local
......@@ -66,8 +66,8 @@ repos:
hooks:
- id: flake8
args: ["--config=.flake8"]
- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: v0.0.254
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.0.272
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix, --no-cache]
......
......@@ -493,10 +493,9 @@ class OperantsAPI(ForwardAPI):
)
first_input_type = " ".join(declare_args[0].split(" ")[:-1])
# NOTE(HongyuJia): Do not consider "const paddle::optional<Tensor>&"
assert first_input_type == "const Tensor&", (
"Error! The first argument of Tensor Api %s must be Tensor, but received %s"
% (func_name, first_input_type)
)
assert (
first_input_type == "const Tensor&"
), f"Error! The first argument of Tensor Api {func_name} must be Tensor, but received {first_input_type}"
for name in self.attrs['names']:
default_value = ''
if self.attrs['attr_info'][name][1] is not None:
......@@ -515,10 +514,9 @@ class OperantsAPI(ForwardAPI):
)
first_input_type = " ".join(define_args[0].split(" ")[:-1])
# NOTE(HongyuJia): Do not consider "const paddle::optional<Tensor>&"
assert first_input_type == "const Tensor&", (
"Error! The first argument of Tensor Api %s must be Tensor, but received %s"
% (func_name, first_input_type)
)
assert (
first_input_type == "const Tensor&"
), f"Error! The first argument of Tensor Api {func_name} must be Tensor, but received {first_input_type}"
for name in self.attrs['names']:
define_args.append(self.attrs['attr_info'][name][0] + ' ' + name)
# remove first Tensor argument
......
[tool.black]
exclude = "build"
line-length = 80
skip-string-normalization = true
extend-exclude = '''
(
third_party/.+ # Exclude third_party directory
| build/.+ # Exclude build directory
)
'''
[tool.isort]
profile = "black"
line_length = 80
known_first_party = ["paddle"]
skip = ["build", "__init__.py"]
skip = ["build", "third_party", "__init__.py"]
extend_skip_glob = [
# These files do not need to be formatted,
# see .flake8 for more details
"python/paddle/fluid/[!t]**",
"python/paddle/fluid/tra**",
"python/paddle/fluid/**",
"python/paddle/utils/gast/**",
# Temporarily ignore CINN files, it will fix later
"python/cinn/**",
"test/cinn/**",
]
[tool.ruff]
exclude = [
"./build",
"./python/paddle/fluid/[!t]**",
"./python/paddle/fluid/tra**",
"third_party",
"./python/paddle/fluid/**",
"./python/paddle/utils/gast/**",
# Temporarily ignore CINN files, it will fix later
"python/cinn/**",
"test/cinn/**",
]
target-version = "py37"
select = [
# Pyflakes
"F401",
"F",
# Comprehensions
"C4",
......@@ -60,17 +70,9 @@ select = [
"B032",
# Pylint
"PLE",
"PLC0414",
"PLC3002",
"PLE0100",
"PLE0101",
"PLE0604",
"PLE0605",
"PLE1142",
"PLE1205",
"PLE1206",
"PLE1307",
"PLE2502",
"PLR0206",
"PLR0402",
]
......@@ -78,6 +80,10 @@ unfixable = [
"NPY001"
]
ignore = [
# `name` may be undefined, or defined from star imports: `module`
"F405",
# Local variable name is assigned to but never used
"F841",
# It not met the "Explicit is better than implicit" rule
"UP015",
# It will cause the performance regression on python3.10
......@@ -87,9 +93,13 @@ ignore = [
[tool.ruff.per-file-ignores]
# Ignore unused imports in __init__.py
"__init__.py" = ["F401"]
# Ignore undefined variables in CMake config and some dygraph_to_static tests
".cmake-format.py" = ["F821"]
"test/dygraph_to_static/test_closure_analysis.py" = ["F821"]
"python/paddle/static/amp/decorator.py" = ["F821"]
# Ignore version check in setup.py
"setup.py" = ["UP036"]
# Ignore unnecessary comprehension in dy2st unittest test_loop
"test/dygraph_to_static/test_loop.py" = ["C416"]
"test/dygraph_to_static/test_loop.py" = ["C416", "F821"]
# Ignore unnecessary lambda in dy2st unittest test_lambda
"test/dygraph_to_static/test_lambda.py" = ["PLC3002"]
......@@ -242,7 +242,7 @@ class ProgramHelper:
# NOTE(dev): Because @to_static is a Lazy mechanism, so we explicitly call this to trigger
# generating Program IR immediately.
getattr(self.proxy_layer, func_name).concrete_program
getattr(self.proxy_layer, func_name).concrete_program # noqa: B018
self._build_startup_program()
......
......@@ -204,15 +204,12 @@ class HybridCommunicateGroup:
self._dp_degree,
)
)
debug_str += (
", mp_group: %s, sharding_group: %s, pp_group: %s, dp_group: %s, check/clip group: %s"
% (
self._mp_group,
self._sharding_group,
self._pp_group,
self._dp_group,
self._check_group,
)
debug_str += ", mp_group: {}, sharding_group: {}, pp_group: {}, dp_group: {}, check/clip group: {}".format(
self._mp_group,
self._sharding_group,
self._pp_group,
self._dp_group,
self._check_group,
)
logger.info(debug_str)
......
......@@ -583,7 +583,7 @@ class UtilBase:
global_block._remove_op(index)
# if fetch_list have lod tensor
return_numpy = all([v.lod_level == 0 for v in fetch_list])
return_numpy = all(v.lod_level == 0 for v in fetch_list)
# try dump fetch_targets
feed_tensors = []
......
......@@ -366,8 +366,7 @@ class MultiSlotDataGenerator(DataGenerator):
)
if name != self._proto_info[index][0]:
raise ValueError(
"the field name of two given line are not match: require<%s>, get<%s>."
% (self._proto_info[index][0], name)
f"the field name of two given line are not match: require<{self._proto_info[index][0]}>, get<{name}>."
)
if output:
output += " "
......
......@@ -307,8 +307,7 @@ class DatasetBase:
data_gen_len = len(user_parsed_line)
if var_len != data_gen_len:
raise ValueError(
"var length mismatch error: var_list = %s vs data_generator = %s"
% (var_len, data_gen_len)
f"var length mismatch error: var_list = {var_len} vs data_generator = {data_gen_len}"
)
for i, ele in enumerate(user_parsed_line):
......@@ -324,10 +323,11 @@ class DatasetBase:
isinstance(ele, float) for ele in ele[1]
):
raise TypeError(
"var dtype mismatch error: var name = %s, var type in var_list = %s, while var in data_generator contains non-float value, which is %s \n"
"var dtype mismatch error: var name = {}, var type in var_list = {}, while var in data_generator contains non-float value, which is {} \n"
"Please check if order of var_list and data_generator are aligned. \n"
"Please check if var's type in data_generator is correct."
% (ele[0], "float", ele[1])
"Please check if var's type in data_generator is correct.".format(
ele[0], "float", ele[1]
)
)
if (
......@@ -335,10 +335,11 @@ class DatasetBase:
or var_list[i].dtype == core.VarDesc.VarType.INT32
) and not all(isinstance(ele, int) for ele in ele[1]):
raise TypeError(
"var dtype mismatch error: var name = %s, var type in var_list = %s, while var in data_generator contains non-int value, which is %s \n"
"var dtype mismatch error: var name = {}, var type in var_list = {}, while var in data_generator contains non-int value, which is {} \n"
"Please check if order of var_list and data_generator are aligned. \n"
"Please check if var's type in data_generator is correct."
% (ele[0], "int", ele[1])
"Please check if var's type in data_generator is correct.".format(
ele[0], "int", ele[1]
)
)
else:
......
......@@ -695,8 +695,9 @@ def get_gpus(gpus):
for x in gpus.split(','):
assert x in cuda_visible_devices_list, (
"Can't find "
"your gpus %s in CUDA_VISIBLE_DEVICES[%s]."
% (x, cuda_visible_devices)
"your gpus {} in CUDA_VISIBLE_DEVICES[{}].".format(
x, cuda_visible_devices
)
)
res_gpus = [
cuda_visible_devices_list.index(x.strip())
......@@ -1485,10 +1486,9 @@ class ParameterServerLauncher:
else:
self.current_node_ip = pod_ip
if not self.distribute_mode == DistributeMode.PS_HETER:
assert self.current_node_ip in self.node_ips, (
"Can't find your local ip {%s} in args.servers and args.workers ips: {%s}"
% (self.current_node_ip, self.node_ips)
)
assert (
self.current_node_ip in self.node_ips
), f"Can't find your local ip {{{self.current_node_ip}}} in args.servers and args.workers ips: {{{self.node_ips}}}"
if self.current_node_ip in self.node_ips:
self.node_rank = self.node_ips.index(self.current_node_ip)
logger.debug(
......
......@@ -69,9 +69,8 @@ def initialize_p2p_groups(
) = _hcg.get_p2p_groups()
debug_str = (
"P2pInfo: send_next_group: %s, send_prev_group: %s, "
"recv_next_group: %s, recv_prev_group: %s"
% (
"P2pInfo: send_next_group: {}, send_prev_group: {}, "
"recv_next_group: {}, recv_prev_group: {}".format(
repr(send_next_group),
repr(send_prev_group),
repr(recv_next_group),
......
......@@ -79,10 +79,8 @@ class GroupShardedStage2(nn.Layer):
else sharding_optimizer
)
assert all(
[
isinstance(opt, GroupShardedOptimizerStage2)
for opt in self._sharding_optimizers
]
isinstance(opt, GroupShardedOptimizerStage2)
for opt in self._sharding_optimizers
), "Please use GroupShardedOptimizerStage2 optimizer"
self._sync_buffers = sync_buffers
self._auto_refresh_trainable = auto_refresh_trainable
......
......@@ -124,7 +124,7 @@ class ParamStorage(InternalStorage):
"""
assert all(
[id(param) not in self._param_ids for param in trainable_params]
id(param) not in self._param_ids for param in trainable_params
), "The same param cannot be checked in twice"
assert self.buffer is not None
......
......@@ -154,16 +154,12 @@ def print_metric(metric_ptr, name):
"""
if name.find("wuauc") != -1:
metric = metric_ptr.get_wuauc_metric_msg(name)
monitor_msg = (
"%s: User Count=%.0f INS Count=%.0f UAUC=%.6f WUAUC=%.6f "
% (name, metric[0], metric[1], metric[4], metric[5])
)
monitor_msg = f"{name}: User Count={metric[0]:.0f} INS Count={metric[1]:.0f} UAUC={metric[4]:.6f} WUAUC={metric[5]:.6f} "
else:
metric = metric_ptr.get_metric_msg(name)
monitor_msg = (
"%s: AUC=%.6f BUCKET_ERROR=%.6f MAE=%.6f RMSE=%.6f "
"Actual CTR=%.6f Predicted CTR=%.6f COPC=%.6f INS Count=%.0f"
% (
"{}: AUC={:.6f} BUCKET_ERROR={:.6f} MAE={:.6f} RMSE={:.6f} "
"Actual CTR={:.6f} Predicted CTR={:.6f} COPC={:.6f} INS Count={:.0f}".format(
name,
metric[0],
metric[1],
......
......@@ -425,8 +425,7 @@ class DataParallel(layers.Layer):
params_set.add(param)
if not isinstance(param, self.var_dtype):
raise TypeError(
"The data type of '%s' must be '%s'"
% (param.name, self.var_dtype)
f"The data type of '{param.name}' must be '{self.var_dtype}'"
)
if param.trainable:
layers_param.append((sublayer, param))
......
......@@ -55,7 +55,7 @@ class RecomputeState(ProgramStats):
return self._reserved_vars
def is_recompute(self):
return any([is_recompute_op(op) for op in self.ops])
return any(is_recompute_op(op) for op in self.ops)
def build_states(self):
for i, op in enumerate(self.ops):
......
......@@ -177,7 +177,7 @@ def find_all_fuse_all_reduce_groups(block):
if in_var.type != core.VarDesc.VarType.LOD_TENSOR:
return False
shape = in_var.shape
if any([s <= 0 for s in shape]):
if any(s <= 0 for s in shape):
return False
return True
......
......@@ -85,7 +85,7 @@ class PassBase(ABC):
def _check_conflict_including_common_rules(self, other_pass):
return self._check_conflict(other_pass) and all(
[r(other_pass, self) for r in PassBase._COMMON_RULES]
r(other_pass, self) for r in PassBase._COMMON_RULES
)
def apply(self, main_programs, startup_programs, context=None):
......@@ -96,10 +96,8 @@ class PassBase(ABC):
return context
if not all(
[
self._check_conflict_including_common_rules(p)
for p in context.passes
]
self._check_conflict_including_common_rules(p)
for p in context.passes
):
return context
......@@ -325,10 +323,8 @@ def _solve_pass_conflict(passes, context):
passes = []
for p in old_passes:
if all(
[
p._check_conflict_including_common_rules(applied_p)
for applied_p in context.passes
]
p._check_conflict_including_common_rules(applied_p)
for applied_p in context.passes
):
passes.append(p)
......
......@@ -211,9 +211,10 @@ def _get_subprocess_env_list(nprocs, options):
for card_id in selected_device_list:
if card_id not in env_devices_list:
raise ValueError(
"The selected gpu card %s cannot found in "
"CUDA_VISIBLE_DEVICES (%s)."
% (card_id, ",".join(env_devices_list))
"The selected gpu card {} cannot found in "
"CUDA_VISIBLE_DEVICES ({}).".format(
card_id, ",".join(env_devices_list)
)
)
elif options['backend'] == 'bkcl':
......@@ -251,9 +252,10 @@ def _get_subprocess_env_list(nprocs, options):
for card_id in selected_device_list:
if card_id not in env_devices_list:
raise ValueError(
"The selected xpu card %s cannot found in "
"XPU_VISIBLE_DEVICES (%s)."
% (card_id, ",".join(env_devices_list))
"The selected xpu card {} cannot found in "
"XPU_VISIBLE_DEVICES ({}).".format(
card_id, ",".join(env_devices_list)
)
)
elif options['backend'] == 'gloo':
# TODO check gpu / xpu flag must not exist
......
......@@ -82,8 +82,9 @@ def get_gpus(selected_gpus):
for x in selected_gpus.split(','):
assert x in cuda_visible_devices_list, (
"Can't find "
"your selected_gpus %s in CUDA_VISIBLE_DEVICES[%s]."
% (x, cuda_visible_devices)
"your selected_gpus {} in CUDA_VISIBLE_DEVICES[{}].".format(
x, cuda_visible_devices
)
)
gpus = [
cuda_visible_devices_list.index(x.strip())
......
......@@ -1894,7 +1894,7 @@ class Model:
assert train_data is not None, "train_data must be given!"
if isinstance(batch_size, (tuple, list)) and all(
[isinstance(x, int) for x in batch_size]
isinstance(x, int) for x in batch_size
):
assert (
len(batch_size) == 2
......
......@@ -1640,10 +1640,9 @@ class FleetUtil:
total_ins_num_name,
)
self.rank0_print(
"%s global AUC=%.6f BUCKET_ERROR=%.6f MAE=%.6f "
"RMSE=%.6f Actural_CTR=%.6f Predicted_CTR=%.6f "
"COPC=%.6f MEAN Q_VALUE=%.6f Ins number=%s"
% (
"{} global AUC={:.6f} BUCKET_ERROR={:.6f} MAE={:.6f} "
"RMSE={:.6f} Actural_CTR={:.6f} Predicted_CTR={:.6f} "
"COPC={:.6f} MEAN Q_VALUE={:.6f} Ins number={}".format(
print_prefix,
auc,
bucket_error,
......
......@@ -76,8 +76,7 @@ class DownpourServer(Server):
return
else:
raise ValueError(
"expect table %s type=%s, but actual type=%s"
% (table_id, pslib.PS_SPARSE_TABLE, table.type)
f"expect table {table_id} type={pslib.PS_SPARSE_TABLE}, but actual type={table.type}"
)
if strategy is None:
strategy = {}
......@@ -388,8 +387,7 @@ class DownpourServer(Server):
return
else:
raise ValueError(
"expect table %s type=%s, but actual type=%s"
% (table_id, pslib.PS_DENSE_TABLE, table.type)
f"expect table {table_id} type={pslib.PS_DENSE_TABLE}, but actual type={table.type}"
)
if strategy is None:
......@@ -480,8 +478,7 @@ class DownpourServer(Server):
return
else:
raise ValueError(
"expect table %s type=%s, but actual type=%s"
% (table_id, pslib.PS_DENSE_TABLE, table.type)
f"expect table {table_id} type={pslib.PS_DENSE_TABLE}, but actual type={table.type}"
)
if strategy is None:
strategy = {}
......
......@@ -277,8 +277,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
def _check_params_grads(self, params, grads):
if len(params) != len(grads):
raise ValueError(
"params size != grads size, %s vs %s"
% (len(params), len(grads))
f"params size != grads size, {len(params)} vs {len(grads)}"
)
pname2grad = {}
......@@ -353,8 +352,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
d_size[table_name] = emb_size
elif d_size[table_name] != emb_size:
raise ValueError(
"embedding size error: %s vs %s"
% (emb_size, d_size[table_name])
f"embedding size error: {emb_size} vs {d_size[table_name]}"
)
return d_size
......@@ -384,9 +382,10 @@ class DistributedAdam(DistributedOptimizerImplBase):
and st["sparse_embedx_dim"] != emb_to_size[table_name] - 3
):
raise ValueError(
"fleet config sparse_embedx_dim=%s not"
" equal to embedding dim - 3 = %s"
% (st["sparse_embedx_dim"], emb_to_size[table_name] - 3)
"fleet config sparse_embedx_dim={} not"
" equal to embedding dim - 3 = {}".format(
st["sparse_embedx_dim"], emb_to_size[table_name] - 3
)
)
if (
st.get("sparse_embedx_dim") is not None
......@@ -394,9 +393,10 @@ class DistributedAdam(DistributedOptimizerImplBase):
and st["sparse_embedx_dim"] != emb_to_size[table_name] - 1
):
raise ValueError(
"fleet config sparse_embedx_dim=%s not"
" equal to embedding dim - 1 = %s"
% (st["sparse_embedx_dim"], emb_to_size[table_name] - 1)
"fleet config sparse_embedx_dim={} not"
" equal to embedding dim - 1 = {}".format(
st["sparse_embedx_dim"], emb_to_size[table_name] - 1
)
)
if (
st.get("sparse_embedx_dim") is None
......@@ -432,9 +432,10 @@ class DistributedAdam(DistributedOptimizerImplBase):
and st["sparse_embedx_dim"] != emb_to_size[table_name]
):
raise ValueError(
"fleet config sparse_embedx_dim=%s not"
" equal to embedding dim = %s"
% (st["sparse_embedx_dim"], emb_to_size[table_name])
"fleet config sparse_embedx_dim={} not"
" equal to embedding dim = {}".format(
st["sparse_embedx_dim"], emb_to_size[table_name]
)
)
if st.get("sparse_embedx_dim") is None:
logger.warning(
......@@ -603,8 +604,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
else:
if len(ps_param.trainer_param) != len(prog_id_to_worker):
raise ValueError(
"trainer param size != program size, %s vs %s"
% (len(ps_param.trainer_param), len(prog_id_to_worker))
f"trainer param size != program size, {len(ps_param.trainer_param)} vs {len(prog_id_to_worker)}"
)
idx = 0
# prog_id_to_worker is OrderedDict
......@@ -682,9 +682,10 @@ class DistributedAdam(DistributedOptimizerImplBase):
and st["sparse_embedx_dim"] != emb_to_size[key] - 3
):
raise ValueError(
"fleet config sparse_embedx_dim=%s not"
" equal to embedding size - 3 = %s"
% (st["sparse_embedx_dim"], emb_to_size[key] - 3)
"fleet config sparse_embedx_dim={} not"
" equal to embedding size - 3 = {}".format(
st["sparse_embedx_dim"], emb_to_size[key] - 3
)
)
st["sparse_embedx_dim"] = emb_to_size[key] - 3
elif accessor == "DownpourSparseValueAccessor":
......@@ -693,9 +694,10 @@ class DistributedAdam(DistributedOptimizerImplBase):
and st["sparse_embedx_dim"] != emb_to_size[key]
):
raise ValueError(
"fleet config sparse_embedx_dim=%s not"
" equal to embedding size = %s"
% (st["sparse_embedx_dim"], emb_to_size[key])
"fleet config sparse_embedx_dim={} not"
" equal to embedding size = {}".format(
st["sparse_embedx_dim"], emb_to_size[key]
)
)
st["sparse_embedx_dim"] = emb_to_size[key]
......
......@@ -321,7 +321,7 @@ def try_load_model_vars(
global_block._remove_op(index)
# if fetch_list have lod tensor
return_numpy = all([v.lod_level == 0 for v in fetch_list])
return_numpy = all(v.lod_level == 0 for v in fetch_list)
# try dump fetch_targets
feed_tensors = []
......
......@@ -108,7 +108,7 @@ class PaddedSeqLenInfo(SeqLenInfo):
@classmethod
def from_seqlens_padded(cls, seqlens, padding):
assert all([seqlen <= padding for seqlen in seqlens])
assert all(seqlen <= padding for seqlen in seqlens)
seqstart_py = list(range(0, len(seqlens) * padding + 1, padding))
return cls(
seqlen=paddle.to_tensor(seqlens, dtype=paddle.int32),
......
......@@ -273,7 +273,7 @@ class TensorDataset(Dataset):
"TensorDataset con only be used in imperative mode"
)
assert all(
[tensor.shape[0] == tensors[0].shape[0] for tensor in tensors]
tensor.shape[0] == tensors[0].shape[0] for tensor in tensors
), "tensors not have same shape of the 1st dimension"
self.tensors = tensors
......
......@@ -599,7 +599,7 @@ def convert_shape(x):
"""
def has_negative(list_shape):
return any([x < 0 for x in list_shape])
return any(x < 0 for x in list_shape)
# When `x` is Variable:
# (1) if x.shape contains -1, such as [2, -1, 64], returns [2, var, 64],
......
......@@ -583,10 +583,8 @@ class PartialProgramLayer:
filter(
lambda x: x[0] >= start_idx
and any(
[
out_arg == var_grad_name
for out_arg in x[1].output_arg_names
]
out_arg == var_grad_name
for out_arg in x[1].output_arg_names
),
enumerate(target_program.block(0).ops),
)
......
......@@ -102,7 +102,7 @@ def _update_padding_nd(padding, channel_last, num_dims):
else:
padding_algorithm = "EXPLICIT"
padding = convert_to_list(padding, num_dims, 'padding')
if not all([p >= 0 for p in padding]):
if not all(p >= 0 for p in padding):
raise ValueError(
"Invalid padding, all value should be larger than or equal to 0, but received: {}".format(
padding
......
......@@ -749,7 +749,7 @@ def max_unpool1d(
This API implements max unpooling 1d opereation.
`max_unpool1d` accepts the output of `max_pool1d` as input,
including the indices of the maximum value and calculate the partial inverse.
All non-maximum values ​​are set to zero.
All non-maximum values are set to zero.
- Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C, L_{out})`, where
......@@ -1025,7 +1025,7 @@ def max_unpool3d(
This API implements max unpooling 3d opereation.
`max_unpool3d` accepts the output of `max_pool3d` as input,
including the indices of the maximum value and calculate the partial inverse.
All non-maximum values ​​are set to zero.
All non-maximum values are set to zero.
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
......
......@@ -1123,7 +1123,7 @@ class MaxUnPool1D(Layer):
`max_unpool1d` accepts the output of `max_pool1d` as input,
including the indices of the maximum value and calculate the partial inverse.
All non-maximum values ​​are set to zero.
All non-maximum values are set to zero.
- Input: :math:`(N, C, L_{in})`
- Output: :math:`(N, C, L_{out})`, where
......@@ -1207,7 +1207,7 @@ class MaxUnPool2D(Layer):
'max_unpool2d' accepts the output of 'max_unpool2d' as input
Including the indices of the maximum value and calculating the partial inverse
All non-maximum values ​​are set to zero.
All non-maximum values are set to zero.
Parameters:
......@@ -1295,7 +1295,7 @@ class MaxUnPool3D(Layer):
`max_unpool3d` accepts the output of `max_pool3d` as input,
including the indices of the maximum value and calculate the partial inverse.
All non-maximum values ​​are set to zero.
All non-maximum values are set to zero.
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
......
......@@ -1014,10 +1014,8 @@ class MultiStepDecay(LRScheduler):
)
if not all(
[
milestones[i] < milestones[i + 1]
for i in range(len(milestones) - 1)
]
milestones[i] < milestones[i + 1]
for i in range(len(milestones) - 1)
):
raise ValueError('The elements of milestones must be incremented')
if gamma >= 1.0:
......
......@@ -452,10 +452,8 @@ class DistributedSummary:
# case 2: TracerEventType is Operator but is communication op
elif hostnode.type == TracerEventType.Operator and any(
[
name in hostnode.name.lower()
for name in _CommunicationOpName
]
name in hostnode.name.lower()
for name in _CommunicationOpName
):
self.cpu_communication_range.append(
(hostnode.start_ns, hostnode.end_ns)
......
......@@ -804,7 +804,7 @@ def decorate(
@overload(key=FunctionType.COMMON)
def decorate(
def decorate( # noqa: F811
optimizer,
amp_lists=None,
level='O1',
......
......@@ -75,7 +75,7 @@ def _check_args(caller, args, supported_args=None, deprecated_args=None):
def _check_vars(name, var_list):
if not isinstance(var_list, list):
var_list = [var_list]
if not all([isinstance(var, Variable) for var in var_list]):
if not all(isinstance(var, Variable) for var in var_list):
raise ValueError(
f"'{name}' should be a Variable or a list of Variable."
)
......
......@@ -473,7 +473,7 @@ def data_norm(
Args:
input (Tensor): The input Tensor.
act (str, optional): Activation type, linear|relu|prelu|... Default: None.
epsilon(float, optional): Whether to add small values ​in​to the variance during calculations
epsilon(float, optional): Whether to add small values into the variance during calculations
to prevent division by zero. Default: 1e-05.
param_attr (ParamAttr, optional): The parameter attribute for Parameter `scale`. Default: None.
data_layout (str, optional): Specify the data format of the input, and the data format of the output
......@@ -1243,8 +1243,9 @@ def conv3d(
if num_channels % groups != 0:
raise ValueError(
"The number of input channels must be divisible by Attr(groups). "
"Received: number of channels(%s), groups(%s)."
% (str(num_channels), str(groups))
"Received: number of channels({}), groups({}).".format(
str(num_channels), str(groups)
)
)
num_filter_channels = num_channels // groups
......
......@@ -2108,11 +2108,9 @@ def assign(x, output=None):
if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
# We only deal with the case where the list is nested one level, convert all scalars into variables, and then use stack to process. It is necessary to ensure the consistency of types.
if not all(
[
x.shape == (1,)
for x in input
if isinstance(x, (Variable, core.eager.Tensor))
]
x.shape == (1,)
for x in input
if isinstance(x, (Variable, core.eager.Tensor))
):
raise TypeError(
"Unsupport paddle.assign([Variable, Variable...]) with non-scalar variable."
......
......@@ -78,8 +78,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if binary_op and x.dtype != y.dtype:
raise ValueError(
"(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s."
% (op_name, x.dtype, y.dtype)
f"(InvalidArgument) The DataType of {op_name} Op's Variable must be consistent, but received {x.dtype} and {y.dtype}."
)
if out is None:
......
......@@ -1851,8 +1851,7 @@ def stack(x, axis=0, name=None):
x = [x]
else:
raise TypeError(
"The type of '%s' in %s must be %s, but received %s"
% (
"The type of '{}' in {} must be {}, but received {}".format(
'x',
'stack',
'list[Tensor], tuple[Tensor] or TensorArray',
......
......@@ -956,8 +956,7 @@ def multiply(x, y, name=None):
else:
if x.dtype != y.dtype:
raise TypeError(
'Input tensors must be same type, but received type of x: %s, type of y: %s '
% (x.dtype, y.dtype)
f'Input tensors must be same type, but received type of x: {x.dtype}, type of y: {y.dtype} '
)
return _elementwise_op(LayerHelper('elementwise_mul', **locals()))
......@@ -1891,8 +1890,9 @@ def mm(input, mat2, name=None):
raise ValueError(
"After performing an optional transpose, Input X's width should be "
"equal to Y's width for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s\n"
% (x_shape, y_shape)
"prerequisites. But received X's shape: {}, Y's shape: {}\n".format(
x_shape, y_shape
)
)
if len(y_shape) > 2 and len(x_shape) > 2:
......@@ -2156,8 +2156,9 @@ def inner(x, y, name=None):
raise ValueError(
"After performing an optional transpose, Input X's last dim should be "
"equal to Y's last dim for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s\n"
% (x_shape, y_shape)
"prerequisites. But received X's shape: {}, Y's shape: {}\n".format(
x_shape, y_shape
)
)
__check_input(nx, ny)
......
......@@ -716,7 +716,7 @@ class BuildExtension(build_ext):
for i, extension in enumerate(self.extensions):
sources = [os.path.abspath(s) for s in extension.sources]
if not self.contain_cuda_file:
self.contain_cuda_file = any([is_cuda_file(s) for s in sources])
self.contain_cuda_file = any(is_cuda_file(s) for s in sources)
op_names = parse_op_name_from(sources)
for op_name in op_names:
......
......@@ -1267,7 +1267,7 @@ def _write_setup_file(
).lstrip()
with_cuda = False
if any([is_cuda_file(source) for source in sources]):
if any(is_cuda_file(source) for source in sources):
with_cuda = True
log_v(f"with_cuda: {with_cuda}", verbose)
......
......@@ -300,8 +300,9 @@ def _recursive_assert_same_structure(nest1, nest2, check_types):
if type_nest1 != type_nest2:
raise TypeError(
"The two structures don't have the same sequence type. First "
"structure has type %s, while second structure has type %s."
% (type_nest1, type_nest2)
"structure has type {}, while second structure has type {}.".format(
type_nest1, type_nest2
)
)
if isinstance(nest1, dict):
keys1 = set(nest1.keys())
......
......@@ -49,8 +49,7 @@ else:
sys.version_info.minor
):
raise RuntimeError(
"You set PY_VERSION=%s, but your current python environment is %s, you should keep them consistent!"
% (
"You set PY_VERSION={}, but your current python environment is {}, you should keep them consistent!".format(
os.getenv("PY_VERSION"),
str(sys.version_info.major)
+ '.'
......
......@@ -54,17 +54,17 @@ class FSTest1(FSTestBase):
s = """
java.io.IOException: Input/output error
responseErrorMsg : failed to getFileStatus, errorCode: 3, path: /user/PUBLIC_KM_Data/wangxi16/data/serving_model, lparam: d868f6bb6822c621, errorMessage: inner error
at org.apache.hadoop.util.FileSystemUtil.throwException(FileSystemUtil.java:164)
at org.apache.hadoop.util.FileSystemUtil.dealWithResponse(FileSystemUtil.java:118)
at org.apache.hadoop.lite.client.LiteClientImpl.getFileStatus(LiteClientImpl.java:696)
at org.apache.hadoop.fs.LibDFileSystemImpl.getFileStatus(LibDFileSystemImpl.java:297)
at org.apache.hadoop.fs.LiteFileSystem.getFileStatus(LiteFileSystem.java:514)
at org.apache.hadoop.fs.FsShell.test(FsShell.java:1092)
at org.apache.hadoop.fs.FsShell.run(FsShell.java:2285)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65)
at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:79)
at org.apache.hadoop.fs.FsShell.main(FsShell.java:2353)
""" # fmt: off, avoid remove tabs in string
\tat org.apache.hadoop.util.FileSystemUtil.throwException(FileSystemUtil.java:164)
\tat org.apache.hadoop.util.FileSystemUtil.dealWithResponse(FileSystemUtil.java:118)
\tat org.apache.hadoop.lite.client.LiteClientImpl.getFileStatus(LiteClientImpl.java:696)
\tat org.apache.hadoop.fs.LibDFileSystemImpl.getFileStatus(LibDFileSystemImpl.java:297)
\tat org.apache.hadoop.fs.LiteFileSystem.getFileStatus(LiteFileSystem.java:514)
\tat org.apache.hadoop.fs.FsShell.test(FsShell.java:1092)
\tat org.apache.hadoop.fs.FsShell.run(FsShell.java:2285)
\tat org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65)
\tat org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:79)
\tat org.apache.hadoop.fs.FsShell.main(FsShell.java:2353)
"""
print("split lines:", s.splitlines())
self.assertIsNotNone(fs._test_match(s.splitlines()))
......
......@@ -218,12 +218,11 @@ class param(_param):
if "after * must be" not in str(e):
raise
raise TypeError(
"Parameters must be tuples, but %r is not (hint: use '(%r, )')"
% (args, args),
f"Parameters must be tuples, but {args!r} is not (hint: use '({args!r}, )')",
)
def __repr__(self):
return "param(*%r, **%r)" % self
return "param(*{!r}, **{!r})".format(*self)
def to_safe_name(s):
......
......@@ -97,7 +97,7 @@ class TestStaticFunctionInstance(unittest.TestCase):
self.assertNotEqual(net_1.forward, net_2.forward)
# convert layer into static progam of net_1
net_1.forward.concrete_program
net_1.forward.concrete_program # noqa: B018
self.assertTrue(len(net_1.forward.program_cache) == 1)
# check no conversion applid with net_2
self.assertTrue(len(net_2.forward.program_cache) == 0)
......@@ -317,7 +317,7 @@ class TestDifferentInputSpecCacheProgram(unittest.TestCase):
# raise error
foo_3 = paddle.jit.to_static(foo_func)
with self.assertRaises(ValueError):
foo_3.concrete_program
foo_3.concrete_program # noqa: B018
class TestInputDefaultName(unittest.TestCase):
......@@ -397,17 +397,17 @@ class TestErrorWithInitFromStaticMode(unittest.TestCase):
with self.assertRaisesRegex(
RuntimeError, "only available in dynamic mode"
):
net.forward.concrete_program
net.forward.concrete_program # noqa: B018
with self.assertRaisesRegex(
RuntimeError, "only available in dynamic mode"
):
net.forward.inputs
net.forward.inputs # noqa: B018
with self.assertRaisesRegex(
RuntimeError, "only available in dynamic mode"
):
net.forward.outputs
net.forward.outputs # noqa: B018
class CallNonForwardFuncNet(paddle.nn.Layer):
......
......@@ -613,7 +613,7 @@ class TestFindStatiConvertVarShapeSuffixVar(unittest.TestCase):
x_spec = paddle.static.InputSpec(shape=[None, 10])
func = paddle.jit.to_static(dyfunc_with_if_2, input_spec=[x_spec])
# Call this function to trigger program translation.
func.concrete_program
func.concrete_program # noqa: B018
if __name__ == '__main__':
......
......@@ -2228,9 +2228,8 @@ class OpTest(unittest.TestCase):
atol=atol,
equal_nan=False,
err_msg=(
"Operator %s error, %s variable %s (shape: %s, dtype: %s) max gradient diff over limit"
)
% (
"Operator {} error, {} variable {} (shape: {}, dtype: {}) max gradient diff over limit"
).format(
self.op_type,
msg_prefix,
name,
......
......@@ -92,7 +92,7 @@ def make_jacobian(x, y_size, np_dtype):
)
return jacobians
else:
None
pass
def _compute_numerical_jacobian(program, x, y, place, scope, delta):
......@@ -321,10 +321,11 @@ def grad_check(
n = numerical[x_idx][y_idx]
if not np.allclose(a, n, rtol, atol):
msg = (
'Jacobian mismatch for output %s '
'with respect to input %s on %s,\n'
'numerical:%s\nanalytical:%s\n'
% (y[y_idx].name, x[x_idx].name, str(place), n, a)
'Jacobian mismatch for output {} '
'with respect to input {} on {},\n'
'numerical:{}\nanalytical:{}\n'.format(
y[y_idx].name, x[x_idx].name, str(place), n, a
)
)
return fail_test(msg)
return True
......
......@@ -620,9 +620,8 @@ class PrimForwardChecker:
# check static forward
if len(ret) != len(self.eager_desire):
msg = (
"The static comp forward api out tensor nums is different with eager forward api out tensor nums on %s."
'when enable_fw_comp is %s, static comp forward api out tensor nums = %s, eager forward api out tensor nums = %s. \n'
% (
"The static comp forward api out tensor nums is different with eager forward api out tensor nums on {}."
'when enable_fw_comp is {}, static comp forward api out tensor nums = {}, eager forward api out tensor nums = {}. \n'.format(
str(self.place),
self.enable_fw_comp,
len(ret),
......@@ -699,9 +698,8 @@ class PrimForwardChecker:
# check jit comp forward
if len(ret) != len(self.eager_desire):
msg = (
"The jit comp forward api out tensor nums is different with eager forward api out tensor nums on %s."
'when enable_fw_comp is %s, jit comp forward api out tensor nums = %s, eager forward api out tensor nums = %s. \n'
% (
"The jit comp forward api out tensor nums is different with eager forward api out tensor nums on {}."
'when enable_fw_comp is {}, jit comp forward api out tensor nums = {}, eager forward api out tensor nums = {}. \n'.format(
str(self.place),
self.enable_fw_comp,
len(ret),
......@@ -795,9 +793,8 @@ class PrimForwardChecker:
# check jit comp forward
if len(ret) != len(self.eager_desire):
msg = (
"The jit comp with cinn forward api out tensor nums is different with eager forward api out tensor nums on %s."
'when enable_fw_comp is %s, enable_cinn is %s, jit comp forward api out tensor nums = %s, eager forward api out tensor nums = %s. \n'
% (
"The jit comp with cinn forward api out tensor nums is different with eager forward api out tensor nums on {}."
'when enable_fw_comp is {}, enable_cinn is {}, jit comp forward api out tensor nums = {}, eager forward api out tensor nums = {}. \n'.format(
str(self.place),
self.enable_fw_comp,
core.is_compiled_with_cinn() and self.enable_cinn,
......@@ -869,8 +866,8 @@ class PrimGradChecker(PrimForwardChecker):
def get_output_dict(self, np_outputs, api_outputs, outputs_sig):
assert len(api_outputs) <= len(outputs_sig), (
"forward api outputs length must be the less than or equal to KernelSignature outputs,but receive %s and %s"
) % (len(api_outputs), len(outputs_sig))
"forward api outputs length must be the less than or equal to KernelSignature outputs,but receive {} and {}"
).format(len(api_outputs), len(outputs_sig))
output_dict = {}
for i in range(len(api_outputs)):
output_name = outputs_sig[i]
......@@ -992,9 +989,8 @@ class PrimGradChecker(PrimForwardChecker):
# check static forward
if len(actual_ret) != len(self.eager_desire):
msg = (
"The eager comp grad out tensor nums is different with eager grad out tensor nums on %s."
'when enable_rev_comp is %s, eager comp grad api out tensor nums = %s, eager grad out tensor nums = %s. \n'
% (
"The eager comp grad out tensor nums is different with eager grad out tensor nums on {}."
'when enable_rev_comp is {}, eager comp grad api out tensor nums = {}, eager grad out tensor nums = {}. \n'.format(
str(self.place),
self.enable_rev_comp,
len(actual_ret),
......@@ -1098,9 +1094,8 @@ class PrimGradChecker(PrimForwardChecker):
# check static grad out
if len(actual_ret) != len(self.eager_desire):
msg = (
"The static comp grad out tensor nums is different with eager grad out tensor nums on %s."
'when enable_fw_comp is %s,enable_rev_comp is %s, static comp grad out tensor nums = %s, eager grad out tensor nums = %s. \n'
% (
"The static comp grad out tensor nums is different with eager grad out tensor nums on {}."
'when enable_fw_comp is {},enable_rev_comp is {}, static comp grad out tensor nums = {}, eager grad out tensor nums = {}. \n'.format(
str(self.place),
self.enable_fw_comp,
self.enable_rev_comp,
......@@ -1215,9 +1210,8 @@ class PrimGradChecker(PrimForwardChecker):
# check jit comp grad out
if len(ret) != len(self.eager_desire):
msg = (
"The jit comp grad out tensor nums is different with eager grad out tensor nums on %s."
'when enable_fw_comp is %s, enable_rev_comp is %s, jit comp grad out tensor nums = %s, eager grad out tensor nums = %s. \n'
% (
"The jit comp grad out tensor nums is different with eager grad out tensor nums on {}."
'when enable_fw_comp is {}, enable_rev_comp is {}, jit comp grad out tensor nums = {}, eager grad out tensor nums = {}. \n'.format(
str(self.place),
self.enable_fw_comp,
self.enable_rev_comp,
......@@ -1346,9 +1340,8 @@ class PrimGradChecker(PrimForwardChecker):
# check jit comp grad out
if len(ret) != len(self.eager_desire):
msg = (
"The jit comp with cinn grad out tensor nums is different with eager grad out tensor nums on %s."
'when enable_fw_comp is %s, enable_rev_comp is %s, enable_cinn is %s, jit comp grad out tensor nums = %s, eager grad out tensor nums = %s. \n'
% (
"The jit comp with cinn grad out tensor nums is different with eager grad out tensor nums on {}."
'when enable_fw_comp is {}, enable_rev_comp is {}, enable_cinn is {}, jit comp grad out tensor nums = {}, eager grad out tensor nums = {}. \n'.format(
str(self.place),
self.enable_fw_comp,
self.enable_rev_comp,
......
......@@ -171,7 +171,6 @@ class TestBilateralSliceOp(OpTest):
def test_check_output(self):
place = paddle.fluid.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-5)
self.check_output
def test_check_grad(self):
place = paddle.fluid.CUDAPlace(0)
......
......@@ -56,7 +56,7 @@ class TestCallbacks(unittest.TestCase):
def test_earlystopping(self):
paddle.seed(2020)
for dynamic in [True, False]:
paddle.enable_static if not dynamic else None
paddle.enable_static() if not dynamic else None
device = paddle.set_device('cpu')
sample_num = 100
train_dataset = MnistDataset(mode='train', sample_num=sample_num)
......
......@@ -1337,7 +1337,6 @@ class TestDistBase(unittest.TestCase):
"PADDLE_TRAINER_ID": f"{trainer_id}",
"PADDLE_TRAINER_ENDPOINTS": self._ps_endpoints,
"PADDLE_CURRENT_ENDPOINT": ep,
"PADDLE_CURRENT_ENDPOINT": ep,
"PADDLE_DISTRI_BACKEND": "gloo",
"GLOG_v": "2",
}
......
......@@ -90,10 +90,8 @@ class TestDownload(unittest.TestCase):
uncompressed_path = get_path_from_url(url, root_dir='./test_tar')
self.assertTrue(
all(
[
os.path.exists(os.path.join("./test_tar", filepath))
for filepath in uncompressd_res
]
os.path.exists(os.path.join("./test_tar", filepath))
for filepath in uncompressd_res
)
)
......@@ -106,10 +104,8 @@ class TestDownload(unittest.TestCase):
uncompressed_path = get_path_from_url(url, root_dir='./test_zip')
self.assertTrue(
all(
[
os.path.exists(os.path.join("./test_zip", filepath))
for filepath in uncompressd_res
]
os.path.exists(os.path.join("./test_zip", filepath))
for filepath in uncompressd_res
)
)
......
......@@ -103,9 +103,8 @@ class TestFeedData(unittest.TestCase):
self._test_feed_data_shape_mismatch(use_cuda)
self.assertEqual(
str(shape_mismatch_err.exception),
"The fed Variable %r should have dimensions = %r, "
"shape = %r, but received fed shape %r on each device"
% (
"The fed Variable {!r} should have dimensions = {!r}, "
"shape = {!r}, but received fed shape {!r} on each device".format(
'data',
len(in_shape_tuple),
in_shape_tuple,
......@@ -117,8 +116,8 @@ class TestFeedData(unittest.TestCase):
self._test_feed_data_dtype_mismatch(use_cuda)
self.assertEqual(
str(dtype_mismatch_err.exception),
"The data type of fed Variable %r must be 'int64', but "
"received 'float64'" % ('label'),
"The data type of fed Variable {!r} must be 'int64', but "
"received 'float64'".format('label'),
)
def _test_feed_data_dtype_mismatch(self, use_cuda):
......
......@@ -93,9 +93,6 @@ class TestFullOpError(unittest.TestCase):
)
output = paddle.full_like(input_data, 2.0)
def test_input_dtype():
paddle.full_like
self.assertRaises(
TypeError,
paddle.full_like,
......
......@@ -476,8 +476,7 @@ class TestDygraphGradientClipByGlobalNorm(TestDygraphGradientClip):
b = global_norm_clip
self.assertTrue(
np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8),
"gradient clip by global norm has wrong results, expetcd:%f, but received:%f"
% (a, b),
f"gradient clip by global norm has wrong results, expetcd:{a:f}, but received:{b:f}",
)
......@@ -505,8 +504,7 @@ class TestDygraphGradientClipByNorm(TestDygraphGradientClip):
b = np.sqrt(np.sum(np.power(v, 2)))
self.assertTrue(
np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8),
"gradient clip by norm has wrong results, expetcd:%f, but received:%f"
% (a, b),
f"gradient clip by norm has wrong results, expetcd:{a:f}, but received:{b:f}",
)
......@@ -602,8 +600,7 @@ class TestDygraphGradientClipFP16(unittest.TestCase):
b = global_norm_clip
self.assertTrue(
np.isclose(a=a, b=b, rtol=1e-3, atol=1e-8),
"gradient clip by global norm has wrong results, expetcd:%f, but received:%f"
% (a, b),
f"gradient clip by global norm has wrong results, expetcd:{a:f}, but received:{b:f}",
)
......@@ -647,8 +644,7 @@ class TestDygraphGradientClipFP64(unittest.TestCase):
self.assertTrue(
np.isclose(a=a, b=b, rtol=1e-6, atol=1e-8),
"gradient clip by global norm has wrong results, expetcd:%f, but received:%f"
% (a, b),
f"gradient clip by global norm has wrong results, expetcd:{a:f}, but received:{b:f}",
)
......
......@@ -23,8 +23,7 @@ from paddle import fluid
class VersionTest(unittest.TestCase):
def test_check_output(self):
warnings.warn(
"paddle.__version__: %s, fluid_version.full_version: %s, fluid_version.major: %s, fluid_version.minor: %s, fluid_version.patch: %s, fluid_version.rc: %s."
% (
"paddle.__version__: {}, fluid_version.full_version: {}, fluid_version.major: {}, fluid_version.minor: {}, fluid_version.patch: {}, fluid_version.rc: {}.".format(
paddle.__version__,
fluid_version.full_version,
fluid_version.major,
......
......@@ -187,8 +187,7 @@ class TestCooSoftmax(unittest.TestCase):
)
else:
print(
"`dim(=%s)` must be smaller than `sparse_dim(=%s) + dense_dim(=%s)`"
% (dim, sparse_dim, dense_dim)
f"`dim(={dim})` must be smaller than `sparse_dim(={sparse_dim}) + dense_dim(={dense_dim})`"
)
def check_run(self, dense_shape):
......
......@@ -162,7 +162,6 @@ def create_bf16_test_class(parent):
dout[:, i],
dout[:, i] * self.alpha[i],
)
self.dx
elif self.mode == "element":
self.dx = np.where(self.x[:] > 0, dout[:], dout[:] * self.alpha)
......
......@@ -253,8 +253,7 @@ class TestImperativePTQ(unittest.TestCase):
self.assertTrue(
after_acc_top1 >= self.eval_acc_top1,
msg="The test acc {%f} is less than {%f}."
% (after_acc_top1, self.eval_acc_top1),
msg=f"The test acc {{{after_acc_top1:f}}} is less than {{{self.eval_acc_top1:f}}}.",
)
self.assertTrue(
infer_acc_top1 >= after_acc_top1,
......@@ -322,8 +321,7 @@ class TestImperativePTQfuse(TestImperativePTQ):
# The acc of quantized model should be higher than 0.95.
self.assertTrue(
after_acc_top1 >= self.eval_acc_top1,
msg="The test acc {%f} is less than {%f}."
% (after_acc_top1, self.eval_acc_top1),
msg=f"The test acc {{{after_acc_top1:f}}} is less than {{{self.eval_acc_top1:f}}}.",
)
# Check the saved infer_model.The acc of infer model
# should not be lower than the one of dygraph model.
......
......@@ -220,13 +220,11 @@ class TestImperativeQatAmp(unittest.TestCase):
)
_logger.info(
'fp32_acc_top1: %f, int8_acc_top1: %f'
% (fp32_acc_top1, int8_acc_top1)
f'fp32_acc_top1: {fp32_acc_top1:f}, int8_acc_top1: {int8_acc_top1:f}'
)
self.assertTrue(
int8_acc_top1 > fp32_acc_top1 - 0.01,
msg='fp32_acc_top1: %f, int8_acc_top1: %f'
% (fp32_acc_top1, int8_acc_top1),
msg=f'fp32_acc_top1: {fp32_acc_top1:f}, int8_acc_top1: {int8_acc_top1:f}',
)
input_spec = [
......
......@@ -112,8 +112,7 @@ class FileReader:
if not isinstance(self._args[key], type):
raise TypeError(
"Invalid type of key [%s] in args dict, it should be a %s!"
% (key, type)
f"Invalid type of key [{key}] in args dict, it should be a {type}!"
)
exec(f"self._{key} = self._args[\"{key}\"]")
......@@ -206,8 +205,9 @@ class FileReader:
)
else:
self._logger.info(
"file list in dir [%s] is : %s !"
% (self._dataPath, ', '.join(self._fileList))
"file list in dir [{}] is : {} !".format(
self._dataPath, ', '.join(self._fileList)
)
)
return self._fileList
......
......@@ -63,8 +63,7 @@ class netFileReader(FileReader):
except Exception:
self._logger.warning(
"invalid record [%s] in [%s]. skip it!"
% (line[:-1], fileName)
f"invalid record [{line[:-1]}] in [{fileName}]. skip it!"
)
traceInfo["traceEvents"] = traceEventList
......
......@@ -25,12 +25,10 @@ def strToSecond(strTime):
def getUsefulBuildTimeFile(filename):
os.system(
"grep -Po -- '-o .*' %s | grep ' elapsed' | grep -P -v '0:00.* elapse' > %s/tools/analysis_build_time"
% (filename, root_path)
f"grep -Po -- '-o .*' {filename} | grep ' elapsed' | grep -P -v '0:00.* elapse' > {root_path}/tools/analysis_build_time"
)
os.system(
"grep -v -- '-o .*' %s |grep ' elapse' | grep -P -v '0:00.* elapse' >> %s/tools/analysis_build_time"
% (filename, root_path)
f"grep -v -- '-o .*' {filename} |grep ' elapse' | grep -P -v '0:00.* elapse' >> {root_path}/tools/analysis_build_time"
)
......@@ -48,22 +46,19 @@ def analysisBuildTime():
buildTime = line.split(', ')[1].split('elapsed')[0].strip()
secondTime = strToSecond(buildTime)
os.system(
"echo %s, %s >> %s/tools/tempbuildTime.txt"
% (buildFile, secondTime, root_path)
f"echo {buildFile}, {secondTime} >> {root_path}/tools/tempbuildTime.txt"
)
else:
buildTime = line.split(', ')[1].split('elapsed')[0].strip()
secondTime = strToSecond(buildTime)
if secondTime > 30:
os.system(
"echo %s, %s >> %s/tools/tempbuildTime.txt"
% (line, secondTime, root_path)
f"echo {line}, {secondTime} >> {root_path}/tools/tempbuildTime.txt"
)
except ValueError:
print(line)
os.system(
'sort -n -k 2 -r %s/tools/tempbuildTime.txt > %s/tools/buildTime.txt'
% (root_path, root_path)
f'sort -n -k 2 -r {root_path}/tools/tempbuildTime.txt > {root_path}/tools/buildTime.txt'
)
......
......@@ -83,12 +83,12 @@ def check_speed_result(case_name, develop_data, pr_data, pr_result):
logging.info("------ OP: %s ------" % case_name)
logging.info(
"GPU time change: %s (develop: %.7f -> PR: %.7f)"
% (gpu_time_diff_str, develop_gpu_time, pr_gpu_time)
f"GPU time change: {gpu_time_diff_str} (develop: {develop_gpu_time:.7f} -> PR: {pr_gpu_time:.7f})"
)
logging.info(
"Total time change: %.5f%% (develop: %.7f -> PR: %.7f)"
% (total_time_diff * 100, develop_total_time, pr_total_time)
"Total time change: {:.5f}% (develop: {:.7f} -> PR: {:.7f})".format(
total_time_diff * 100, develop_total_time, pr_total_time
)
)
logging.info("backward: %s" % pr_result.get("backward"))
logging.info("parameters:")
......
......@@ -81,10 +81,7 @@ print_arguments()
# List the commits in mainline branch.
os.chdir(args.git_dir)
ret = subprocess.check_output(
[
'git rev-list --first-parent %s...%s'
% (args.good_commit, args.bad_commit)
],
[f'git rev-list --first-parent {args.good_commit}...{args.bad_commit}'],
shell=True,
)
sys.stdout.write('commits found:\n%s\n' % ret)
......@@ -121,8 +118,9 @@ while True:
# Link error can happen without complete clean up.
cmd = (
'rm -rf * && '
'cmake -DWITH_TESTING=ON %s >> %s && make -j%s >> %s'
% (args.git_dir, args.log_file, args.build_parallel, args.log_file)
'cmake -DWITH_TESTING=ON {} >> {} && make -j{} >> {}'.format(
args.git_dir, args.log_file, args.build_parallel, args.log_file
)
)
sys.stdout.write('cmd: %s\n' % cmd)
try:
......
......@@ -104,7 +104,7 @@ class PRChecker:
def __urlretrieve(self, url, filename):
ix = 1
with_proxy = urllib.request.getproxies()
without_proxy = {'http': '', 'http': ''}
without_proxy = {'http': '', 'https': ''}
while ix < 6:
if ix // 2 == 0:
cur_proxy = urllib.request.ProxyHandler(without_proxy)
......
......@@ -84,8 +84,7 @@ def analysisFNDAFile(rootPath, test):
notrelated_ut_map_file
):
print(
"make %s and %s successfully"
% (related_ut_map_file, related_ut_map_file)
f"make {related_ut_map_file} and {related_ut_map_file} successfully"
)
else:
print(f"make {related_ut_map_file} and {related_ut_map_file} failed")
......@@ -132,8 +131,7 @@ def analysisFNDAFile(rootPath, test):
clazz_filename not in related_file_list
): # xx.pb.cc in RELATED xx.pb.h not in RELATED
os.system(
'echo %s >> %s'
% (clazz_filename, notrelated_ut_map_file)
f'echo {clazz_filename} >> {notrelated_ut_map_file}'
)
f.close()
......
......@@ -34,8 +34,7 @@ def get_all_paddle_file(rootPath):
def get_all_uts(rootPath):
all_uts_paddle = '%s/build/all_uts_paddle' % rootPath
os.system(
r'cd %s/build && ctest -N -V | grep -Ei "Test[ \t]+#" | grep -oEi "\w+$" > %s'
% (rootPath, all_uts_paddle)
fr'cd {rootPath}/build && ctest -N -V | grep -Ei "Test[ \t]+#" | grep -oEi "\w+$" > {all_uts_paddle}'
)
......
......@@ -30,8 +30,7 @@ def group_case_for_parallel(rootPath):
'exclusive_card_tests_mem0',
]:
os.system(
'cd %s/tools && wget --no-proxy https://paddle-docker-tar.bj.bcebos.com/pre_test_bak/%s --no-check-certificate'
% (rootPath, filename)
f'cd {rootPath}/tools && wget --no-proxy https://paddle-docker-tar.bj.bcebos.com/pre_test_bak/{filename} --no-check-certificate'
)
# get nightly tests
......
......@@ -62,8 +62,7 @@ def insert_pile_to_h_file(rootPath):
os.system(f'echo "#define _PRECISE{func.upper()}_" >> {line}')
os.system('echo "\n#include <cstdio>\n" >> %s' % line)
os.system(
'echo "__attribute__((constructor)) static void calledFirst%s()\n{" >> %s'
% (func, line)
f'echo "__attribute__((constructor)) static void calledFirst{func}()\n{{" >> {line}'
)
os.system(
'echo \' fprintf(stderr,"precise test map fileeee: %%s\\\\n", __FILE__);\n}\' >> %s'
......@@ -118,8 +117,7 @@ def get_h_cu_file(file_path):
ut_path = f"{rootPath}/build/ut_map/{ut}"
if os.path.exists(ut_path):
os.system(
"cat %s/%s | grep 'precise test map fileeee:'| uniq >> %s/build/ut_map/%s/related_%s.txt"
% (dir_path, filename, rootPath, ut, ut)
f"cat {dir_path}/{filename} | grep 'precise test map fileeee:'| uniq >> {rootPath}/build/ut_map/{ut}/related_{ut}.txt"
)
else:
print("%s has failed,no has direcotory" % ut)
......
......@@ -130,10 +130,7 @@ def append_fluid_kernels():
new_content = content.replace(location_str, location_str + append_str)
if new_content == content:
print(
"ERROR: can not find \"%s\" in file \"%s\""
% (location_str, file_name)
)
print(f"ERROR: can not find \"{location_str}\" in file \"{file_name}\"")
return False
with open(file_name, 'w', encoding='utf-8') as f:
......
......@@ -264,11 +264,9 @@ def is_required_match(requirestr, cbtitle='not-specified'):
return None
if all(
[
k in SAMPLE_CODE_TEST_CAPACITY
for k in requires
if k not in ['skip', 'skiptest']
]
k in SAMPLE_CODE_TEST_CAPACITY
for k in requires
if k not in ['skip', 'skiptest']
):
return True
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册