未验证 提交 ca7394cd 编写于 作者: I Infinity_lee 提交者: GitHub

[CodeStyle][C403] Unnecessary list comprehension (rewrite as a set comprehension) (#51968)

上级 cf391b81
......@@ -1060,9 +1060,7 @@ class PaddleCloudRoleMaker(RoleMakerBase):
self._trainers_num = trainers_num
self._role = role
self._current_id = current_id
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints])
)
self._nodes_num = len({x.split(':')[0] for x in self._worker_endpoints})
def _collective_env(self):
self._current_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
......@@ -1078,9 +1076,7 @@ class PaddleCloudRoleMaker(RoleMakerBase):
self._non_distributed = True
self._worker_endpoints = self._worker_endpoints.split(",")
self._trainers_num = len(self._worker_endpoints)
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints])
)
self._nodes_num = len({x.split(':')[0] for x in self._worker_endpoints})
self._local_rank = os.getenv("PADDLE_RANK_IN_NODE")
self._local_device_ids = os.getenv("PADDLE_LOCAL_DEVICE_IDS")
self._world_device_ids = os.getenv("PADDLE_WORLD_DEVICE_IDS")
......@@ -1206,18 +1202,14 @@ class UserDefinedRoleMaker(PaddleCloudRoleMaker):
self._cur_endpoint = self._worker_endpoints[self._current_id]
elif self._role == Role.SERVER:
self._cur_endpoint = self._server_endpoints[self._current_id]
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints])
)
self._nodes_num = len({x.split(':')[0] for x in self._worker_endpoints})
def _user_defined_collective_env(self):
self._worker_endpoints = self._kwargs.get("worker_endpoints")
self._current_id = self._kwargs.get("current_id")
self._trainers_num = len(self._worker_endpoints)
self._training_role = Role.WORKER
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints])
)
self._nodes_num = len({x.split(':')[0] for x in self._worker_endpoints})
def _generate_role(self):
"""
......
......@@ -176,7 +176,7 @@ class DygraphShardingOptimizer:
# NOTE in dygraph mode, the only different between step and minimize is that minimize
# allow user to customize the parameters for updating on each step
input_param_names = set([param.name for param in parameters])
input_param_names = {param.name for param in parameters}
parameters = list(
filter(
lambda x: x.name in input_param_names,
......
......@@ -142,13 +142,11 @@ class FP16Utils:
# the grad checking should take the all and only param in the current shard
to_check_param = set(reversed_x_paramname)
should_check_param = set(shard.global_params).intersection(
set(
[
param
for param, worker_idx in shard.global_param2device.items()
if worker_idx == shard.worker_idx
]
)
{
param
for param, worker_idx in shard.global_param2device.items()
if worker_idx == shard.worker_idx
}
)
assert (
to_check_param == should_check_param
......
......@@ -134,13 +134,11 @@ class GradientClipHelper:
# the grad sum here should take the all and only param in the current shard
to_check_param = set(reversed_x_paramname)
should_check_param = set(shard.global_params).intersection(
set(
[
param
for param, worker_idx in shard.global_param2device.items()
if worker_idx == shard.worker_idx
]
)
{
param
for param, worker_idx in shard.global_param2device.items()
if worker_idx == shard.worker_idx
}
)
assert (
to_check_param == should_check_param
......
......@@ -35,7 +35,7 @@ class Shard:
def setup(self, params_grads, worker_idx, worker_num):
# param names of all devices
self.global_params = set([x[0].name for x in params_grads])
self.global_params = {x[0].name for x in params_grads}
# _param(str) -> device_id(int)
self.worker_idx = worker_idx
self.worker_num = worker_num
......
......@@ -907,7 +907,7 @@ class ShardingOptimizer(MetaOptimizerBase):
def _build_shard(self, params_grads, shard_rank, shard_size):
# step 2: split params
self._params = set([x[0].name for x in params_grads])
self._params = {x[0].name for x in params_grads}
self._shard.setup(params_grads, shard_rank, shard_size)
# step 3: get broadcast vars
......
......@@ -45,7 +45,7 @@ def process_args(ctx):
def collective_compatible(ctx):
if 'PADDLE_TRAINER_ENDPOINTS' in ctx.envs:
eps = ctx.envs['PADDLE_TRAINER_ENDPOINTS'].split(',')
hosts = set([h.split(':')[0] for h in eps])
hosts = {h.split(':')[0] for h in eps}
ctx.args.master = eps[0] if ':' in eps[0] else '{}:6768'.format(eps[0])
ctx.args.nnodes = len(hosts)
ctx.logger.info(
......@@ -54,7 +54,7 @@ def collective_compatible(ctx):
if 'DISTRIBUTED_TRAINER_ENDPOINTS' in ctx.envs:
eps = ctx.envs['DISTRIBUTED_TRAINER_ENDPOINTS'].split(',')
hosts = set([h.split(':')[0] for h in eps])
hosts = {h.split(':')[0] for h in eps}
ctx.args.master = eps[0]
ctx.args.nnodes = len(hosts)
ctx.logger.info(
......
......@@ -1116,7 +1116,7 @@ def init_parallel_env():
paddle.distributed.barrier(group=group)
return group
node_num = set([i.split(":")[0] for i in parallel_env.trainer_endpoints])
node_num = {i.split(":")[0] for i in parallel_env.trainer_endpoints}
# 3: init gloo context (step 1: httpsever start)
init_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
if is_cpu_only or init_gloo or backend == "heter":
......
......@@ -208,12 +208,10 @@ class DataParallelOptimizationPass(PassBase):
def _all_dp_groups_same_degree(self):
return (
len(
set(
[
len(group.ranks)
for group in self._group_to_grad_name_map.keys()
]
)
{
len(group.ranks)
for group in self._group_to_grad_name_map.keys()
}
)
== 1
)
......@@ -430,7 +428,7 @@ class DataParallelOptimizationPass(PassBase):
def op_depend_on_group(op, group):
vars_ = set(op.input_arg_names + op.output_arg_names)
grad_names = set([grad.name for grad in group.gradients])
grad_names = {grad.name for grad in group.gradients}
return len(vars_.intersection(grad_names)) > 0
for i, op in enumerate(ops):
......
......@@ -969,7 +969,7 @@ class ShardingPass(PassBase):
def op_depend_on_group(op, group):
vars_ = set(op.input_arg_names + op.output_arg_names)
var_names = set([var.name for var in group.vars])
var_names = {var.name for var in group.vars}
return len(vars_.intersection(var_names)) > 0
# analyze groups
......
......@@ -510,7 +510,7 @@ class SingleProcessMultiThread(GradAllReduce):
def _transpile_startup_program(self):
nodes_num = 0
if len(self.endpoints) > 1:
nodes_num = len(set([x.split(':')[0] for x in self.endpoints]))
nodes_num = len({x.split(':')[0] for x in self.endpoints})
# diffent ip num is multi node
if nodes_num > 1:
self.nranks = nodes_num
......
......@@ -98,7 +98,7 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase):
self, model, inputs, gt_out, feed=None, fetch=None
):
expected_persistable_vars = set([p.name for p in model.parameters()])
expected_persistable_vars = {p.name for p in model.parameters()}
infer_model_prefix = os.path.join(
self.temp_dir.name, "test_dy2stat_inference/model"
......
......@@ -103,9 +103,9 @@ class TestBackward(unittest.TestCase):
params_grads = fluid.backward.append_backward(
loss, parameter_list, no_grad_set
)
params_names = set(
[param_var.name for (param_var, grad_var) in params_grads]
)
params_names = {
param_var.name for (param_var, grad_var) in params_grads
}
self.assertSetEqual(params_names, self.net.params_names)
return params_grads
......
......@@ -34,7 +34,7 @@ def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod):
ids = random.sample([i for i in range(num_prior)], gt_num)
match_indices[n, ids] = [i for i in range(gt_num)]
ret_ids = set([i for i in range(num_prior)]) - set(ids)
ret_ids = {i for i in range(num_prior)} - set(ids)
l = neg_lod[n]
neg_ids = random.sample(ret_ids, l)
neg_indices[offset : offset + neg_lod[n], :] = (
......
......@@ -462,7 +462,7 @@ def squeeze2_composite(x, axis):
if len(axis) == 0:
dims = set(range(rank))
else:
dims = set([ax % rank for ax in axis])
dims = {ax % rank for ax in axis}
new_shape = []
for d, s in enumerate(x.shape):
if not (s == 1 and (d in dims)):
......
......@@ -1321,9 +1321,9 @@ class Optimizer:
def _get_no_grad_set(self, loss, no_grad_set=None):
no_grad_set = _get_no_grad_set_name(no_grad_set)
parameters = loss.block.program.global_block().all_parameters()
param_no_trainable = set(
[param.name for param in parameters if param.stop_gradient is True]
)
param_no_trainable = {
param.name for param in parameters if param.stop_gradient is True
}
# If the parameter is no trainable, it should not have a gradient.
no_grad_set.update(param_no_trainable)
......
......@@ -1498,7 +1498,7 @@ def load(program, model_path, executor=None, var_list=None):
"var_list is required when loading model file saved with [ save_params, save_persistables, save_vars ]"
)
program_var_list = program.list_vars()
program_var_name_set = set([var.name for var in program_var_list])
program_var_name_set = {var.name for var in program_var_list}
# check all the variable inlcuded in program
for var in var_list:
......
......@@ -1277,7 +1277,7 @@ def parse_op_name_from(sources):
pattern = re.compile(r'PD_BUILD_OP\(([^,\)]+)\)')
content = re.sub(r'\s|\t|\n', '', content)
op_name = pattern.findall(content)
op_name = set([re.sub('_grad', '', name) for name in op_name])
op_name = {re.sub('_grad', '', name) for name in op_name}
return op_name
......
......@@ -30,7 +30,7 @@ def generate_spec(filename):
def read_spec(filename):
with open(filename, 'r') as f:
return set([line.strip() for line in f.readlines()])
return {line.strip() for line in f.readlines()}
def get_spec_diff(dev_filename, pr_filename):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册