未验证 提交 929892c3 编写于 作者: C cyberslack_lee 提交者: GitHub

[CodeStyle][C416][C417] rewrite unnecessary comprehension with function call...

[CodeStyle][C416][C417] rewrite unnecessary comprehension with function call and use generator instead of map (#52140)

* codestyle c416 c417

* fix error

* fix inc

* unify all C4 rules into one

* fix inc

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 b6ae6a5d
......@@ -59,7 +59,7 @@ def find_arch_range(min_arch, max_arch):
def find_max_arch(arch):
arch = list(sorted(arch))
arch = sorted(arch)
idx = DEFAULT_ARCH.index(arch[-1])
if idx == len(DEFAULT_ARCH) - 1:
return MAX_ARCH
......
......@@ -33,21 +33,7 @@ select = [
"F401",
# Comprehensions
"C400",
"C401",
"C402",
"C403",
"C404",
"C405",
"C408",
"C409",
"C410",
"C411",
# "C413",
# "C414",
# "C415",
# "C416",
# "C417",
"C4",
# Pyupgrade
"UP001",
......@@ -174,5 +160,7 @@ unfixable = [
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py" = ["UP034"]
# Ignore version check in setup.py
"setup.py" = ["UP036"]
# Ignore unnecessary comprehension in dy2st unittest test_loop
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py" = ["C416"]
# Ignore unnecessary lambda in dy2st unittest test_lambda
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_lambda.py" = ["PLC3002"]
......@@ -170,8 +170,8 @@ class DistributedSaver:
global_block = dist_main_prog.global_block()
ops = global_block.ops
feed_vars_names = list(map(lambda x: x.name, feed_vars))
fetch_vars_names = list(map(lambda x: x.name, fetch_vars))
feed_vars_names = [x.name for x in feed_vars]
fetch_vars_names = [x.name for x in fetch_vars]
last_idx = -1
for idx, op in enumerate(ops):
......
......@@ -39,7 +39,7 @@ class DistributedTensor:
):
if not (
isinstance(sizes, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x >= 0, sizes))
and all(isinstance(x, int) and x >= 0 for x in sizes)
):
raise ValueError(
"The sizes must be list or tuple and item in sizes must be non-negative integer, but got {}".format(
......@@ -48,7 +48,7 @@ class DistributedTensor:
)
if not (
isinstance(dims_mapping, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x >= -1, dims_mapping))
and all(isinstance(x, int) and x >= -1 for x in dims_mapping)
):
raise ValueError(
"The dims_mapping must be list or tuple and item in dims_mapping must >= -1, but got {}".format(
......@@ -57,7 +57,7 @@ class DistributedTensor:
)
if not (
isinstance(processes, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x >= 0, processes))
and all(isinstance(x, int) and x >= 0 for x in processes)
):
raise ValueError(
"The processes must be list or tuple and item in processes must be integer, but got {}".format(
......@@ -66,7 +66,7 @@ class DistributedTensor:
)
if not (
isinstance(topology, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x > 0, topology))
and all(isinstance(x, int) and x > 0 for x in topology)
):
raise ValueError(
"The topology must be list or tuple and item in topology must be non-negative integer, but got {}".format(
......@@ -162,9 +162,9 @@ class DistributedTensor:
len(local_sizes), len(local_offsets)
)
local_end_offsets = list(
map(lambda x: x[0] + x[1], zip(local_offsets, local_sizes))
)
local_end_offsets = [
x[0] + x[1] for x in zip(local_offsets, local_sizes)
]
local_shard = list(zip(local_offsets, local_end_offsets))
return local_shard
......
......@@ -337,7 +337,7 @@ class PlanSpace:
vars = program.global_block().vars
processes = reduce(lambda x, y: x * y, process_mesh_topology)
global_group = [i for i in range(processes)]
global_group = list(range(processes))
global_process_mesh = None
pipeline_process_meshes = None
......
......@@ -1340,15 +1340,13 @@ class Resharder:
if op_input:
op_input_dims_mapping = dist_attr[1]
if all(
map(
lambda x: x,
[
tensor_dims_mapping,
tensor_process_mesh,
op_input_dims_mapping,
op_process_mesh,
],
)
x
for x in [
tensor_dims_mapping,
tensor_process_mesh,
op_input_dims_mapping,
op_process_mesh,
]
):
# judge whether need reshard by dims_mapping
if tensor_dims_mapping != op_input_dims_mapping:
......@@ -1379,15 +1377,13 @@ class Resharder:
else:
op_output_dims_mapping = dist_attr[1]
if all(
map(
lambda x: x,
[
tensor_dims_mapping,
tensor_process_mesh,
op_output_dims_mapping,
op_process_mesh,
],
)
x
for x in [
tensor_dims_mapping,
tensor_process_mesh,
op_output_dims_mapping,
op_process_mesh,
]
):
if tensor_dims_mapping != op_output_dims_mapping:
raise ValueError(
......@@ -1554,7 +1550,7 @@ class Resharder:
i += 1
if i == len(has_used):
has_used = list(map(lambda x: False, has_used))
has_used = [False for x in has_used]
to_send_process = process_list[0]
has_used[0] = True
assert (
......@@ -1744,11 +1740,9 @@ class Resharder:
if isinstance(op_desc, AllGatherOpDesc): # noqa: F401
if var_name not in self.has_allgather.keys():
self.has_allgather[var_name] = []
if not self.has_allgather[
var_name
] or op_desc.group not in list(
map(lambda x: x[0], self.has_allgather[var_name])
):
if not self.has_allgather[var_name] or op_desc.group not in [
x[0] for x in self.has_allgather[var_name]
]:
if op_desc.is_bool:
# for bool data allgather, cast to int64 -> allgather -> cast bool
out_cast = Inserter.insert_cast_op(
......
......@@ -290,7 +290,7 @@ class ParallelTuner:
return self._cached_dims_mapping_candidates[key]
candidates = []
dims_mapping = [-1 for i in range(dims_mapping_len)]
dims_list = [i for i in range(process_mesh_len)]
dims_list = list(range(process_mesh_len))
visited = [False for i in range(process_mesh_len)]
self._generate_dims_mapping_candidates_helper(
dims_mapping, dims_list, 0, visited, candidates
......
......@@ -1631,13 +1631,12 @@ class RuleBasedTuner:
Most of the logic is the same as the update completion in the completer.
"""
world_ranks = ProcessMesh(
[
i
for i in range(
list(
range(
self._cluster.get_num_machines()
* self._cluster._num_devices_per_machine
)
]
)
)
dist_tensors = sub_program_dist_context._dist_tensors_for_program
......@@ -1958,10 +1957,9 @@ class RuleBasedTuner:
self.device_meshes_list.append([])
for device_mesh in device_meshes:
devices = reduce(lambda x, y: x * y, device_mesh)
processes = [
i
for i in range(has_used_devices, has_used_devices + devices)
]
processes = list(
range(has_used_devices, has_used_devices + devices)
)
device_mesh_shape = (
device_mesh
if device_mesh[0] != 1
......
......@@ -117,7 +117,7 @@ class TunableSpace:
{"class_name": v.__class__.__name__, "state": v.get_state()}
for v in self._variables.values()
],
"values": {k: v for (k, v) in self.values.items()},
"values": dict(self.values.items()),
}
@classmethod
......@@ -126,7 +126,7 @@ class TunableSpace:
for v in state["variables"]:
v = _deserialize_tunable_variable(v)
ts._variables[v.name] = v
ts._values = {k: v for (k, v) in state["values"].items()}
ts._values = dict(state["values"].items())
return ts
......
......@@ -115,7 +115,7 @@ class Choice(TunableVariable):
default = bool(default)
else:
self._is_unknown_type = True
self._indices = [i for i in range(len(values))]
self._indices = list(range(len(values)))
self.values = values
if default is not None and default not in values:
......
......@@ -1684,7 +1684,7 @@ def get_standalone_cost_data(distributed_programs):
shape = info[
shape_left_boundary + 1 : shape_right_boundary
].split(",")
shape = list(map(lambda x: int(x.strip()), shape))
shape = [int(x.strip()) for x in shape]
dtype_factor = 1
total_static_input_size += reduce(lambda x, y: x * y, shape)
if op.type == "c_embedding":
......
......@@ -87,9 +87,7 @@ paddlecloud environment.".format(
if started_port is None:
started_port = 6170
ports = [
x for x in range(started_port, started_port + len(selected_devices))
]
ports = list(range(started_port, started_port + len(selected_devices)))
trainer_endpoints = []
for ip in node_ips:
trainer_endpoints.append(["%s:%d" % (ip, port) for port in ports])
......
......@@ -129,9 +129,7 @@ def get_cloud_cluster(
device_count = 1
devices_per_proc = [str(x) for x in range(device_count)]
free_ports = [
x for x in range(start_port, start_port + len(devices_per_proc))
]
free_ports = list(range(start_port, start_port + len(devices_per_proc)))
trainer_endpoints = []
for ip in node_ips:
......
......@@ -382,7 +382,7 @@ class UtilBase:
if paddle.static.io.is_persistable(v)
]
pruned_vars = OrderedDict(pruned_vars)
pruned_vars_name = [name for name in pruned_vars]
pruned_vars_name = list(pruned_vars)
print("persistable vars in pruned program: {}".format(pruned_vars_name))
# feed and fetch op is added in pruned program when pruning, not need to be found in train program
......
......@@ -75,9 +75,7 @@ paddlecloud environment.".format(
if started_port is None:
started_port = 6170
ports = [
x for x in range(started_port, started_port + len(devices_per_proc))
]
ports = list(range(started_port, started_port + len(devices_per_proc)))
trainer_endpoints = []
for ip in node_ips:
trainer_endpoints.append(["%s:%d" % (ip, port) for port in ports])
......
......@@ -338,7 +338,7 @@ class ElasticManager:
ip = endpoints
port = start_port
ports = [x for x in range(port, port + len(devices_per_proc))]
ports = list(range(port, port + len(devices_per_proc)))
endpoint_list.extend(["%s:%d" % (ip, port) for port in ports])
dist_endpoints = ','.join(endpoint_list)
......@@ -360,7 +360,7 @@ class ElasticManager:
self.etcd.cancel_watch(watch)
self.etcd.delete(self.host_path)
hosts = [i for i in self.etcd.get_prefix(self.node_prefix)]
hosts = list(self.etcd.get_prefix(self.node_prefix))
if len(hosts) == 0:
self.etcd.delete_prefix(self.prefix)
......
......@@ -314,9 +314,7 @@ def get_cluster_from_args(args, device_mode, devices_per_proc):
if os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT'))
free_ports = [
x for x in range(start_port, start_port + len(devices_per_proc))
]
free_ports = list(range(start_port, start_port + len(devices_per_proc)))
trainer_endpoints = []
for ip in node_ips:
......
......@@ -951,7 +951,7 @@ def get_device_proc_info(args):
if args.nproc_per_node is None:
devices_per_proc = [0]
else:
devices_per_proc = [x for x in range(0, args.nproc_per_node)]
devices_per_proc = list(range(0, args.nproc_per_node))
else:
raise AssertionError(
"Can't support device_mode:{}, support only cpu|gpu|xpu now.".format(
......@@ -1107,20 +1107,14 @@ def get_mapped_cluster_from_args_without_rank_mapping(args, device_mode):
node_rank = node_ips.index(ip)
if os.environ.get('PADDLE_PORT') is not None:
start_port = int(os.getenv("PADDLE_PORT", ""))
free_ports = [
x
for x in range(
start_port, start_port + len(node_ranks[node_rank])
)
]
free_ports = list(
range(start_port, start_port + len(node_ranks[node_rank]))
)
elif os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT'))
free_ports = [
x
for x in range(
start_port, start_port + len(node_ranks[node_rank])
)
]
free_ports = list(
range(start_port, start_port + len(node_ranks[node_rank]))
)
else:
free_ports = find_free_ports(len(node_ranks[node_rank]))
trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
......@@ -1250,20 +1244,14 @@ def get_mapped_cluster_from_args_with_rank_mapping(args, device_mode):
node_rank = node_ips.index(ip)
if os.environ.get('PADDLE_PORT') is not None:
start_port = int(os.getenv("PADDLE_PORT", ""))
free_ports = [
x
for x in range(
start_port, start_port + len(node_ranks[node_rank])
)
]
free_ports = list(
range(start_port, start_port + len(node_ranks[node_rank]))
)
elif os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT'))
free_ports = [
x
for x in range(
start_port, start_port + len(node_ranks[node_rank])
)
]
free_ports = list(
range(start_port, start_port + len(node_ranks[node_rank]))
)
else:
free_ports = find_free_ports(len(node_ranks[node_rank]))
trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
......
......@@ -196,7 +196,7 @@ class AscendIRParser:
HcomGroupConfig(
name="hcom_group_0",
nranks=fleet.world_size(),
rank_ids=[x for x in range(fleet.world_size())],
rank_ids=list(range(fleet.world_size())),
)
)
......
......@@ -2422,7 +2422,7 @@ class TransposeGradParser(AscendParserBase):
x_shape = self.op.block.var(self.op.input_arg_names[1]).shape[1:]
out_grad_shape = self.op.block.var(self.op.input_arg_names[0]).shape
assert list(map(lambda x: out_grad_shape[x], perm)) == list(x_shape)
assert [out_grad_shape[x] for x in perm] == list(x_shape)
x_grad = (
core.GEOperatorFactory.create_operator(
......
......@@ -27,7 +27,7 @@ def _obtain_optimizer_parameters_list(optimizer):
for param in group['params']:
parameters_list.append(param)
else:
parameters_list = [param for param in optimizer._parameter_list]
parameters_list = list(optimizer._parameter_list)
return parameters_list
......
......@@ -37,7 +37,7 @@ def _obtain_optimizer_parameters_list(optimizer):
for param in group['params']:
parameters_list.append(param)
else:
parameters_list = [param for param in optimizer._parameter_list]
parameters_list = list(optimizer._parameter_list)
return parameters_list
......
......@@ -337,7 +337,7 @@ class PipelineParallel(MetaParallelBase):
assert len(outputs) == len(output_tensor_grad)
paddle.autograd.backward(
tensors=outputs,
grad_tensors=[t for t in output_tensor_grad],
grad_tensors=list(output_tensor_grad),
)
else:
paddle.autograd.backward(
......
......@@ -438,7 +438,7 @@ class GroupShardedOptimizerStage2(Optimizer):
if self.offload:
self._optim._master_weights = self._master_params
cpu_master_params = [p for p in self._master_params.values()]
cpu_master_params = list(self._master_params.values())
for param in cpu_master_params:
size = param._numel() * align[Type.fp32.value]
remaining = size % alignment[self.offload_device]
......
......@@ -79,12 +79,10 @@ class GroupShardedStage2(nn.Layer):
else sharding_optimizer
)
assert all(
list(
map(
lambda opt: isinstance(opt, GroupShardedOptimizerStage2),
self._sharding_optimizers,
)
)
[
isinstance(opt, GroupShardedOptimizerStage2)
for opt in self._sharding_optimizers
]
), "Please use GroupShardedOptimizerStage2 optimizer"
self._sync_buffers = sync_buffers
self._auto_refresh_trainable = auto_refresh_trainable
......
......@@ -335,7 +335,7 @@ class GroupShardedStage3(nn.Layer):
buffer_size[param.dtype] += param._numel() + p_align
# Create unslice_params'grad
for param in sorted(list(self._unslice_params), key=lambda p: p.name):
for param in sorted(self._unslice_params, key=lambda p: p.name):
if param.dtype not in self._grad_storages.keys():
self._grad_storages[param.dtype] = GradStorage(
buffer_size[param.dtype],
......
......@@ -1445,7 +1445,7 @@ class TheOnePSRuntime(RuntimeBase):
generate_vars = self.context[
"user_defined_strategy"
].trainer_desc_configs["stat_var_names"]
generate_vars = [var for var in generate_vars]
generate_vars = list(generate_vars)
remaining_vars = list(
filter(
TheOnePSRuntime.__exclude_vars(sparse_names),
......
......@@ -207,7 +207,7 @@ class ETCDMaster(Master):
while not self.ctx.status.is_done():
self.client.put(path, value.encode('latin-1'))
result = [i for i in self.client.get_prefix(prefix)]
result = list(self.client.get_prefix(prefix))
result = copy.deepcopy(result)
self.ctx.logger.debug("sync peers {}".format(result))
......
......@@ -47,8 +47,8 @@ class PSController(Controller):
else:
host = self.ctx.node.ip
server_endpoints = [s for s in self.ctx.args.servers.split(",")]
trainer_endpoints = [s for s in self.ctx.args.trainers.split(",")]
server_endpoints = list(self.ctx.args.servers.split(","))
trainer_endpoints = list(self.ctx.args.trainers.split(","))
servers = [
s for s in self.ctx.args.servers.split(",") if s.startswith(host)
]
......
......@@ -643,7 +643,7 @@ class DataParallelOptimizationPass(PassBase):
)
# insert dependency op
indice = sorted(list(dep_map.keys()), reverse=True)
indice = sorted(dep_map.keys(), reverse=True)
for i in indice:
for idx, prior_vars, post_vars, op_role in dep_map[i][::-1]:
depend_op = insert_dependencies_for_vars(
......
......@@ -893,7 +893,7 @@ class ShardingPass(PassBase):
)
# insert deps
indice = sorted(list(dep_map.keys()), reverse=True)
indice = sorted(dep_map.keys(), reverse=True)
for i in indice:
for idx, prior_vars, post_vars, comm_stream in dep_map[i][::-1]:
depend_op = insert_dependencies_for_vars(
......@@ -1263,7 +1263,7 @@ class ShardingPass(PassBase):
idx += 1
# insert deps
indice = sorted(list(dep_map.keys()), reverse=True)
indice = sorted(dep_map.keys(), reverse=True)
for i in indice:
for idx, prior_vars, post_vars, comm_stream in dep_map[i][::-1]:
depend_op = insert_dependencies_for_vars(
......
......@@ -138,7 +138,7 @@ class AutoParalSupplementDepPass(PassBase):
prior_varname = op.output("ParamOut")[0]
# insert deps
indice = sorted(list(deps_map.keys()), reverse=True)
indice = sorted(deps_map.keys(), reverse=True)
for idx in indice:
prior_var = main_block.var(deps_map[idx][0])
post_var = main_block.var(deps_map[idx][1])
......
......@@ -1579,7 +1579,7 @@ class TheOnePSRuntime(RuntimeBase):
generate_vars = self.context[
"user_defined_strategy"
].trainer_desc_configs["stat_var_names"]
generate_vars = [var for var in generate_vars]
generate_vars = list(generate_vars)
remaining_vars = list(
filter(
TheOnePSRuntime.__exclude_vars(sparse_names),
......
......@@ -1698,7 +1698,7 @@ def add_send_op(program, block, _vars):
def get_vars_name_in_block(block):
vars_list = block.vars.keys()
vars_name_list = [var_name for var_name in vars_list]
vars_name_list = list(vars_list)
return vars_name_list
......
......@@ -331,7 +331,7 @@ def _get_subprocess_env_list(nprocs, options):
# get cluster and pod config
if options['backend'] == 'gloo':
devices_per_proc = [x for x in range(0, nprocs)]
devices_per_proc = list(range(0, nprocs))
cluster, pod = get_cluster_from_args(
args, DeviceMode.CPU, devices_per_proc
)
......
......@@ -54,9 +54,9 @@ def get_cluster_from_args(args, selected_gpus):
if args.started_port is not None:
started_port = args.started_port
free_ports = [
x for x in range(started_port, started_port + len(selected_gpus))
]
free_ports = list(
range(started_port, started_port + len(selected_gpus))
)
trainer_endpoints = []
for ip in node_ips:
......
......@@ -16,9 +16,8 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main
import paddle
import paddle.distributed as dist
import paddle.fluid as fluid
import paddle.fluid.data_feeder as data_feeder
import paddle.framework as framework
from paddle import fluid, framework
from paddle.fluid import data_feeder
paddle.enable_static()
......
......@@ -16,9 +16,8 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main
import paddle
import paddle.distributed as dist
import paddle.fluid as fluid
import paddle.fluid.data_feeder as data_feeder
import paddle.framework as framework
from paddle import fluid, framework
from paddle.fluid import data_feeder
paddle.enable_static()
......
......@@ -72,7 +72,7 @@ class TestSimpleNet(TestParallelDyGraphRunnerBase):
return model, train_reader, optimizer
def run_one_loop(self, model, optimizer, batch):
x_data = np.array([x for x in batch])
x_data = np.array(list(batch))
x_data = x_data.reshape((-1, 10))
x = to_variable(x_data)
out = model(x)
......
......@@ -60,7 +60,7 @@ class TestNoSync(TestParallelDyGraphRunnerBase):
return model, train_reader, optimizer
def run_one_loop(self, model, optimizer, batch):
x_data = np.array([x for x in batch])
x_data = np.array(list(batch))
x_data = x_data.reshape((-1, 10))
x = paddle.to_tensor(x_data)
out = model(x)
......
......@@ -55,7 +55,7 @@ class TestNoSyncControlFlow(TestNoSync):
return model, train_reader, optimizer
def run_one_loop(self, model, optimizer, batch):
x_data = np.array([x for x in batch])
x_data = np.array(list(batch))
x_data = x_data.reshape((-1, 10))
x = paddle.to_tensor(x_data)
out = model(x)
......
......@@ -54,7 +54,7 @@ class TestNoSyncUnusedParam(TestNoSync):
return model, train_reader, optimizer
def run_one_loop(self, model, optimizer, batch):
x_data = np.array([x for x in batch])
x_data = np.array(list(batch))
x_data = x_data.reshape((-1, 10))
x = paddle.to_tensor(x_data)
out = model(x)
......
......@@ -282,7 +282,7 @@ class DataReader:
sent_ids = [0 for i in range(sent0_len)] + [
1 for i in range(sent1_len)
]
pos_ids = [i for i in range(sent0_len + sent1_len)]
pos_ids = list(range(sent0_len + sent1_len))
label = 1
yield token_ids, sent_ids, pos_ids, label
......
......@@ -185,9 +185,7 @@ class TestStaticAnalysis(unittest.TestCase):
if wrapper.parent is not None:
self.assertTrue(wrapper in wrapper.parent.children)
children_ast_nodes = [
child for child in gast.iter_child_nodes(wrapper.node)
]
children_ast_nodes = list(gast.iter_child_nodes(wrapper.node))
self.assertEqual(len(wrapper.children), len(children_ast_nodes))
for child in wrapper.children:
self.assertTrue(child.node in children_ast_nodes)
......
......@@ -55,8 +55,7 @@ class PrePostProcessLayer(Layer):
elif cmd == "n": # add layer normalization
self.functors.append(
self.add_sublayer(
"layer_norm_%d"
% len([layer for layer in self.children()]),
"layer_norm_%d" % len(list(self.children())),
paddle.nn.LayerNorm(
normalized_shape=d_model,
weight_attr=fluid.ParamAttr(
......
......@@ -90,7 +90,7 @@ class TestFcFusePass(PassAutoScanTest):
if begin_norm_axis == x_shape_rank - 1 and draw(st.booleans()):
reduce_mean_dim = [-1]
else:
reduce_mean_dim = [i for i in range(x_shape_rank)]
reduce_mean_dim = list(range(x_shape_rank))
reduce_mean_dim = reduce_mean_dim[begin_norm_axis:]
error_test_ratio = draw(st.integers(min_value=1, max_value=10))
if error_test_ratio > 9:
......
......@@ -47,7 +47,7 @@ class TestOneDNNReshapeTransposeMatmulFusePass(PassAutoScanTest):
def generate_input2(attrs):
shape_x = [attrs[3]['batch_size'], attrs[3]['channel'], self.num]
input_volume = reduce(lambda x, y: x * y, shape_x)
matmul_shape = [i for i in attrs[0]['shape']]
matmul_shape = list(attrs[0]['shape'])
if 0 in matmul_shape:
for i in range(len(matmul_shape)):
if matmul_shape[i] == 0:
......
......@@ -89,7 +89,7 @@ class TestTransposeFlattenConcatFusePass(PassAutoScanTest):
inputs = {}
x_shape_rank = draw(st.integers(min_value=2, max_value=5))
# Generate axis of transpose
trans_axis = [j for j in range(x_shape_rank)]
trans_axis = list(range(x_shape_rank))
for j in range(x_shape_rank - 1):
if draw(st.booleans()):
trans_axis[j], trans_axis[-1] = trans_axis[-1], trans_axis[j]
......
......@@ -48,14 +48,14 @@ def reference_matmul(X, Y, transpose_x=False, transpose_y=False):
elif X.ndim == 2:
X = X.T
else:
dim = [i for i in range(len(X.shape))]
dim = list(range(len(X.shape)))
dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1]
X = np.transpose(X, tuple(dim))
if transpose_y:
if Y.ndim == 1:
Y = Y.reshape((Y.size,))
else:
dim = [i for i in range(len(Y.shape))]
dim = list(range(len(Y.shape)))
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
Y = np.transpose(Y, tuple(dim))
......
......@@ -64,7 +64,7 @@ class TestSimpleNet(TestParallelDyGraphRunnerBase):
return model, train_reader, optimizer
def run_one_loop(self, model, optimizer, batch):
x_data = np.array([x for x in batch])
x_data = np.array(list(batch))
x_data = x_data.reshape((-1, 10))
x = paddle.to_tensor(x_data)
out = model(x)
......
......@@ -60,7 +60,7 @@ class TestSimpleNet(TestParallelDyGraphRunnerBase):
return model, train_reader, optimizer
def run_one_loop(self, model, optimizer, batch):
x_data = np.array([x for x in batch])
x_data = np.array(list(batch))
x_data = x_data.reshape((-1, 10))
x = to_variable(x_data)
out = model(x)
......
......@@ -104,7 +104,7 @@ class TestAllocContinuousSpace(OpTest):
out[0:length] = input[1].flatten()
inputs.append(out)
coalesce_tensor_var = np.concatenate([input for input in inputs])
coalesce_tensor_var = np.concatenate(list(inputs))
if set_constant:
coalesce_tensor_var = np.ones(len(coalesce_tensor_var)) * constant
outputs = [
......
......@@ -65,7 +65,7 @@ def create_pyobject_test_data(shape=None, seed=None):
np.random.seed(seed)
list_shape = np.random.randint(0, high=100, size=(2)).tolist()
list_data = np.random.random(shape).tolist()
dict_key = [i for i in range(0, shape[0])]
dict_key = list(range(0, shape[0]))
dict_val = np.random.random(shape).tolist()
dict_data = dict(zip(dict_key, dict_val))
return [list_data, dict_data]
......
......@@ -796,12 +796,10 @@ class TestCUDNN_FP16(TestConv2DTransposeOp):
['Filter'],
'Output',
max_relative_error=0.02,
no_grad_set=set(['Input']),
no_grad_set={'Input'},
)
else:
self.check_grad(
['Filter'], 'Output', no_grad_set=set(['Input'])
)
self.check_grad(['Filter'], 'Output', no_grad_set={'Input'})
def test_check_grad_no_filter(self):
if self.need_check_grad:
......@@ -813,12 +811,10 @@ class TestCUDNN_FP16(TestConv2DTransposeOp):
['Input'],
'Output',
max_relative_error=0.02,
no_grad_set=set(['Filter']),
no_grad_set={'Filter'},
)
else:
self.check_grad(
['Input'], 'Output', no_grad_set=set(['Filter'])
)
self.check_grad(['Input'], 'Output', no_grad_set={'Filter'})
def test_check_grad(self):
if self.need_check_grad:
......@@ -827,13 +823,13 @@ class TestCUDNN_FP16(TestConv2DTransposeOp):
if core.is_float16_supported(place):
self.check_grad_with_place(
place,
set(['Input', 'Filter']),
{'Input', 'Filter'},
'Output',
max_relative_error=0.02,
)
else:
self.check_grad(
set(['Input', 'Filter']), 'Output', max_relative_error=0.02
{'Input', 'Filter'}, 'Output', max_relative_error=0.02
)
......@@ -980,7 +976,7 @@ class TestCUDNN_BF16(TestConv2DTransposeOp):
['Filter'],
'Output',
max_relative_error=0.02,
no_grad_set=set(['Input']),
no_grad_set={'Input'},
user_defined_grads=[numeric_grads],
)
......@@ -992,7 +988,7 @@ class TestCUDNN_BF16(TestConv2DTransposeOp):
['Input'],
'Output',
max_relative_error=0.02,
no_grad_set=set(['Filter']),
no_grad_set={'Filter'},
user_defined_grads=[numeric_grads],
)
......
......@@ -37,7 +37,7 @@ class TestCreateParameterError(unittest.TestCase):
def test_attr():
paddle.create_parameter(
[1, 2, 3], np.float32, attr=np.array([i for i in range(6)])
[1, 2, 3], np.float32, attr=np.array(list(range(6)))
)
self.assertRaises(TypeError, test_attr)
......@@ -46,7 +46,7 @@ class TestCreateParameterError(unittest.TestCase):
paddle.create_parameter(
[1, 2, 3],
np.float32,
default_initializer=np.array([i for i in range(6)]),
default_initializer=np.array(list(range(6))),
)
self.assertRaises(TypeError, test_default_initializer)
......
......@@ -69,7 +69,7 @@ class TestCropTensorOp(OpTest):
else:
self.attrs['offsets'] = self.offsets
crop_shape = [val for val in self.crop_shape]
crop_shape = list(self.crop_shape)
for i in range(len(self.crop_shape)):
if self.crop_shape[i] == -1:
crop_shape[i] = self.x_shape[i] - self.offsets[i]
......@@ -169,7 +169,7 @@ class TestCropTensorOpTensorAttr(OpTest):
self.attrs['shape'] = self.crop_shape
self.attrs['offsets'] = self.offsets
crop_shape = [val for val in self.crop_shape]
crop_shape = list(self.crop_shape)
for i in range(len(self.crop_shape)):
if self.crop_shape[i] == -1:
crop_shape[i] = self.x_shape[i] - self.offsets[i]
......
......@@ -121,8 +121,8 @@ def operator_equal(a, b):
continue
elif isinstance(v, collections.OrderedDict):
v0 = sorted(list(v.items()), key=lambda x: x[0])
v1 = sorted(list(b.__dict__[k].items()), key=lambda x: x[0])
v0 = sorted(v.items(), key=lambda x: x[0])
v1 = sorted(b.__dict__[k].items(), key=lambda x: x[0])
if v0 != v1:
raise ValueError("In operator_equal not equal:{0}\n".format(k))
......
......@@ -660,7 +660,7 @@ class TestParallelDyGraphRunnerBase:
if paddle.distributed.get_rank() == 0:
new_batch.append(batch[0])
elif paddle.distributed.get_rank() == 1:
new_batch.extend([_ for _ in batch[1:]])
new_batch.extend(list(batch[1:]))
else:
raise NotImplementedError(
"Current TestParallelDyGraphRunnerBase don't support world_size > 2"
......
......@@ -34,7 +34,7 @@ class TestLookupTableFuseOp(unittest.TestCase):
def check_with_place(self, place):
scope = fluid.global_scope()
scope.var("LearningRate").get_tensor().set([0.01], place)
scope.var("Ids").get_tensor().set([i for i in range(100)], place)
scope.var("Ids").get_tensor().set(list(range(100)), place)
init_program = fluid.Program()
......
......@@ -40,7 +40,7 @@ def gru(
for i in range(len(seq_lens)):
seq_starts.append(seq_starts[-1] + seq_lens[i])
sorted_seqs = sorted(
list(range(len(seq_lens))),
range(len(seq_lens)),
key=functools.cmp_to_key(lambda x, y: seq_lens[y] - seq_lens[x]),
)
num_batch = seq_lens[sorted_seqs[0]]
......
......@@ -58,7 +58,7 @@ class TestKLDivLossOp(OpTest):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Loss', no_grad_set=set(["Target"]))
self.check_grad(['X'], 'Loss', no_grad_set={"Target"})
def initTestCase(self):
self.x_shape = (4, 5, 5)
......
......@@ -58,7 +58,7 @@ class TestKthvalueOp(OpTest):
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out')
self.check_grad({'X'}, 'Out')
class TestKthvalueOpWithKeepdim(OpTest):
......@@ -85,7 +85,7 @@ class TestKthvalueOpWithKeepdim(OpTest):
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out')
self.check_grad({'X'}, 'Out')
class TestKthvalueOpKernels(unittest.TestCase):
......
......@@ -115,7 +115,7 @@ class TestLodAppendOpByAttr(OpTest):
lod = [[3, 2, 5]]
# target_offset_lod and target_lod are the same lod info represented
# in offset-based format and length-based format, respectively.
target_offset_lod = [i for i in range(11)]
target_offset_lod = list(range(11))
self.inputs = {'X': (x, lod)}
out_lod = [[3, 2, 5], [1] * 10]
# The `target_lod` attribute is still based on offset
......
......@@ -128,7 +128,7 @@ class TestLookupTableBF16OpWIsSelectedRows(unittest.TestCase):
self.place = core.CPUPlace()
def prepare_w(self):
rows = [a for a in range(self.w_bf16.shape[0])]
rows = list(range(self.w_bf16.shape[0]))
row_numel = self.w_bf16.shape[1]
w_selected_rows = self.scope.var('W').get_selected_rows()
......
......@@ -65,14 +65,14 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
elif X.ndim == 2:
X = X.T
else:
dim = [i for i in range(len(X.shape))]
dim = list(range(len(X.shape)))
dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1]
X = np.transpose(X, tuple(dim))
if transpose_Y:
if Y.ndim == 1:
Y = Y.reshape((1, Y.size))
else:
dim = [i for i in range(len(Y.shape))]
dim = list(range(len(Y.shape)))
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
Y = np.transpose(Y, tuple(dim))
......
......@@ -33,14 +33,14 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
elif X.ndim == 2:
X = X.T
else:
dim = [i for i in range(len(X.shape))]
dim = list(range(len(X.shape)))
dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1]
X = np.transpose(X, tuple(dim))
if transpose_Y:
if Y.ndim == 1:
Y = Y.reshape((Y.size,))
else:
dim = [i for i in range(len(Y.shape))]
dim = list(range(len(Y.shape)))
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
Y = np.transpose(Y, tuple(dim))
......
......@@ -78,7 +78,7 @@ class TestModeOp(OpTest):
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out')
self.check_grad({'X'}, 'Out')
class TestModeOpLastdim(OpTest):
......@@ -103,7 +103,7 @@ class TestModeOpLastdim(OpTest):
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out')
self.check_grad({'X'}, 'Out')
class TestModeOpKernels(unittest.TestCase):
......
......@@ -86,9 +86,7 @@ class TestRegisteredPhiKernels(unittest.TestCase):
def test_registered_phi_kernels(self):
phi_function_kernel_infos = core._get_registered_phi_kernels("function")
registered_kernel_list = [
name for name in phi_function_kernel_infos.keys()
]
registered_kernel_list = list(phi_function_kernel_infos.keys())
forward_kernels = get_all_kernels(
self.forward_ops, registered_kernel_list
)
......
......@@ -193,7 +193,7 @@ class TestSparseGradParamSGDOpBF16(TestSparseSGDOpBF16):
self.grad_height = 10
self.grad_rows = [0, 4, 7]
self.grad_row_numel = 12
self.param_rows = [a for a in range(self.grad_height)]
self.param_rows = list(range(self.grad_height))
def test_sparse_param_grad_sgd(self):
scope = core.Scope()
......@@ -228,7 +228,7 @@ class TestSparseGradParamSGDOpBF16Case2(TestSparseGradParamSGDOpBF16):
self.grad_height = 14
self.grad_rows = [1, 4, 12, 7, 8]
self.grad_row_numel = 16
self.param_rows = [a for a in range(self.grad_height)]
self.param_rows = list(range(self.grad_height))
@OpTestTool.skip_if_not_cpu_bf16()
......
......@@ -23,7 +23,7 @@ import paddle
def common_setup(self, index_num, nshards, shard_id, ignore_value):
self.op_type = 'shard_index'
self.python_api = paddle.tensor.shard_index
x_lod = [[i for i in range(10)]]
x_lod = [list(range(10))]
N = sum(x_lod[0])
x = [np.random.randint(0, index_num - 1) for i in range(N)]
x = np.array(x).astype('int32').reshape([N, 1])
......
......@@ -102,7 +102,7 @@ class TestSmoothL1LossOp2(OpTest):
['Y'],
'Out',
max_relative_error=0.03,
no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight']),
no_grad_set={'X', 'InsideWeight', 'OutsideWeight'},
)
def test_check_grad_ingore_y(self):
......@@ -110,7 +110,7 @@ class TestSmoothL1LossOp2(OpTest):
['X'],
'Out',
max_relative_error=0.03,
no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight']),
no_grad_set={'Y', 'InsideWeight', 'OutsideWeight'},
)
......
......@@ -274,16 +274,14 @@ class TestLoDTensorAndSelectedRowsOp(TestSelectedRowsSumOp):
self.assertEqual(out_t.shape[0], self.height)
np.testing.assert_array_equal(
out_t,
self._get_array([i for i in range(self.height)], self.row_numel)
self._get_array(list(range(self.height)), self.row_numel)
* np.tile(np.array(result).reshape(self.height, 1), self.row_numel),
)
def create_lod_tensor(self, scope, place, var_name):
var = scope.var(var_name)
w_tensor = var.get_tensor()
w_array = self._get_array(
[i for i in range(self.height)], self.row_numel
)
w_array = self._get_array(list(range(self.height)), self.row_numel)
w_tensor.set(w_array, place)
return var
......
......@@ -31,10 +31,10 @@ def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod):
offset = 0
for n in range(batch_size):
gt_num = gt_lod[n]
ids = random.sample([i for i in range(num_prior)], gt_num)
match_indices[n, ids] = [i for i in range(gt_num)]
ids = random.sample(list(range(num_prior)), gt_num)
match_indices[n, ids] = list(range(gt_num))
ret_ids = {i for i in range(num_prior)} - set(ids)
ret_ids = set(range(num_prior)) - set(ids)
l = neg_lod[n]
neg_ids = random.sample(ret_ids, l)
neg_indices[offset : offset + neg_lod[n], :] = (
......
......@@ -122,14 +122,10 @@ cases = {
for _op_type in ['tril', 'triu']:
for _expected, _params in cases.items():
for _Xshape, _diaglist in _params.items():
list(
map(
lambda _diagonal: case_generator(
_op_type, _Xshape, _diagonal, _expected
),
_diaglist,
)
)
[
case_generator(_op_type, _Xshape, _diagonal, _expected)
for _diagonal in _diaglist
]
class TestTrilTriuOpAPI(unittest.TestCase):
......
......@@ -483,14 +483,10 @@ class BertTokenizer(PretrainedTokenizer):
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(
map(
lambda x: 1
if x in [self.sep_token_id, self.cls_token_id]
else 0,
token_ids_0,
)
)
return [
1 if x in [self.sep_token_id, self.cls_token_id] else 0
for x in token_ids_0
]
if token_ids_1 is not None:
return (
......
......@@ -93,7 +93,7 @@ class XPUTestCoalesceTensorOp(XPUOpTestWrapper):
out[0:length] = input[1].flatten()
inputs.append(out)
coalesce_tensor_var = np.concatenate([input for input in inputs])
coalesce_tensor_var = np.concatenate(list(inputs))
if set_constant:
coalesce_tensor_var = (
np.ones(len(coalesce_tensor_var)) * constant
......
......@@ -39,7 +39,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
elif X.ndim == 2:
X = X.T
else:
dim = [i for i in range(len(X.shape))]
dim = list(range(len(X.shape)))
dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1]
X = np.transpose(X, tuple(dim))
if transpose_Y:
......@@ -48,7 +48,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
elif Y.ndim == 2:
Y = Y.T
else:
dim = [i for i in range(len(Y.shape))]
dim = list(range(len(Y.shape)))
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
Y = np.transpose(Y, tuple(dim))
......
......@@ -38,14 +38,14 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
elif X.ndim == 2:
X = X.T
else:
dim = [i for i in range(len(X.shape))]
dim = list(range(len(X.shape)))
dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1]
X = np.transpose(X, tuple(dim))
if transpose_Y:
if Y.ndim == 1:
Y = Y.reshape((Y.size,))
else:
dim = [i for i in range(len(Y.shape))]
dim = list(range(len(Y.shape)))
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
Y = np.transpose(Y, tuple(dim))
Out = np.matmul(X, Y)
......
......@@ -250,7 +250,7 @@ class TestLoDTensorAndSelectedRowsOp(unittest.TestCase):
self.assertEqual(out_t.shape[0], self.height)
np.testing.assert_array_equal(
out_t,
self._get_array([i for i in range(self.height)], self.row_numel)
self._get_array(list(range(self.height)), self.row_numel)
* np.tile(np.array(result).reshape(self.height, 1), self.row_numel),
)
......@@ -281,9 +281,7 @@ class TestLoDTensorAndSelectedRowsOp(unittest.TestCase):
return var
def create_lod_tensor(self, place):
w_array = self._get_array(
[i for i in range(self.height)], self.row_numel
)
w_array = self._get_array(list(range(self.height)), self.row_numel)
return paddle.to_tensor(w_array)
def test_w_is_selected_rows(self):
......
......@@ -71,7 +71,7 @@ def config_callbacks(
class CallbackList:
def __init__(self, callbacks=None):
# copy
self.callbacks = [c for c in callbacks]
self.callbacks = list(callbacks)
self.params = {}
self.model = None
......
......@@ -315,7 +315,7 @@ def rsqrt_orig2prim(op, x):
@REGISTER_ORIG2PRIM('matmul_v2')
def matmul_v2_orig2prim(op, x, y):
def trans(shape):
ret = [i for i in range(len(shape))]
ret = list(range(len(shape)))
ret[-1], ret[-2] = ret[-2], ret[-1]
return ret
......
......@@ -178,9 +178,7 @@ def _append_pserver_ops(
merged_vars = []
merged_ordervars = []
param_vars = [
p for p in config.param_grad_ep_mapping[endpoint]["params"]
]
param_vars = list(config.param_grad_ep_mapping[endpoint]["params"])
for var in param_vars:
name = var.name
......@@ -1066,7 +1064,7 @@ def build_pserver_startup_program_pass(program, p_main_program, config):
def add_geo_optimizer_pass(program, config):
endpoint = config.get_ps_endpoint()
params = [p for p in config.param_grad_ep_mapping[endpoint]["params"]]
params = list(config.param_grad_ep_mapping[endpoint]["params"])
sparse_tablenames = get_sparse_tablenames(
config.get_origin_main_program(), False
......
......@@ -2087,7 +2087,7 @@ def find_op_input_output(program, block, op):
def get_vars_name_in_block(block):
vars_list = block.vars.keys()
vars_name_list = [var_name for var_name in vars_list]
vars_name_list = list(vars_list)
return vars_name_list
......
......@@ -542,7 +542,7 @@ class PSLib(Fleet):
for i in tp.dense_table:
if table_id is not None and table_id != i.table_id:
continue
var_list = [var for var in i.dense_variable_name]
var_list = list(i.dense_variable_name)
skip = False
for var in var_list:
if scope.find_var(var) is None:
......@@ -751,7 +751,7 @@ class PSLib(Fleet):
for i in tp.dense_table:
if table_id is not None and table_id != i.table_id:
continue
table_var_names = [var for var in i.dense_variable_name]
table_var_names = list(i.dense_variable_name)
skip = False
for var in table_var_names:
if scope.find_var(var) is None:
......
......@@ -744,8 +744,8 @@ class DistributedAdam(DistributedOptimizerImplBase):
]
program_configs[program_id] = {
"pull_sparse": [t_index for t_index in sparse_table_index],
"push_sparse": [t_index for t_index in sparse_table_index],
"pull_sparse": list(sparse_table_index),
"push_sparse": list(sparse_table_index),
}
params_grads = prog_id_to_param_grads[program_id]
......
......@@ -941,7 +941,7 @@ class GeneralRoleMaker(RoleMakerBase):
"""
if not self._role_is_generated:
self.generate_role()
input_list = [i for i in input]
input_list = list(input)
ans = self._node_type_comm.all_reduce(input_list, mode)
for i in range(len(ans)):
output[i] = ans[i]
......
......@@ -96,7 +96,7 @@ def check_pruned_program_vars(train_prog, pruned_prog):
if io_utils.is_persistable(v)
]
pruned_vars = OrderedDict(pruned_vars)
pruned_vars_name = [name for name in pruned_vars]
pruned_vars_name = list(pruned_vars)
logger.info(
"persistable vars in pruned program: {}".format(pruned_vars_name)
)
......@@ -497,7 +497,7 @@ def parse_program(program, output_dir):
f.write("\n")
# all vars
all_vars = [v for v in program.list_vars()]
all_vars = list(program.list_vars())
output["all_vars"] = [
{
'name': str(v.name),
......
......@@ -55,7 +55,7 @@ def indexable(x, code=None):
if isinstance(x, Variable):
return x
elif hasattr(x, '__iter__'):
return [i for i in x]
return list(x)
elif hasattr(x, '__len__') and hasattr(
x, '__getitem__'
): # used for customed type and non-iterable type.
......@@ -575,14 +575,14 @@ class VariableTuple:
def convert_enumerate(*args):
has_variable = any(map(lambda x: isinstance(x, Variable), args))
has_variable = any(isinstance(x, Variable) for x in args)
if has_variable:
return VariableTuple(*args)
return enumerate(*args)
def convert_range(*args):
has_variable = any(map(lambda x: isinstance(x, Variable), args))
has_variable = any(isinstance(x, Variable) for x in args)
if has_variable:
if len(args) == 1:
return paddle.arange(0, args[0], 1, paddle.int64)
......
......@@ -1279,7 +1279,7 @@ class FunctionNameLivenessAnalysis(gast.NodeVisitor):
assert isinstance(
node, gast.FunctionDef
), "Input node is not function define node"
names = [a for a in node.args.args]
names = list(node.args.args)
names.append(node.args.vararg)
names.append(node.args.kwarg)
names = [i.id for i in names if i is not None]
......@@ -1387,8 +1387,8 @@ class GetterSetterHelper:
"""
def __init__(self, getter_func, setter_func, *name_lists):
name_lists = map(lambda x: [] if x is None else x, name_lists)
name_sets = map(lambda x: set(x), name_lists)
name_lists = ([] if x is None else x for x in name_lists)
name_sets = (set(x) for x in name_lists)
self._union = list(
functools.reduce(lambda x, y: x | y, name_sets, set())
)
......@@ -1412,7 +1412,7 @@ class GetterSetterHelper:
), "the name `{}` not in name union set`{}`.".format(
n, self.name2id.keys()
)
return tuple(map(lambda n: vars[self.name2id[n]], names))
return tuple(vars[self.name2id[n]] for n in names)
def set(self, names, values):
if names is None:
......@@ -1429,7 +1429,7 @@ class GetterSetterHelper:
n, self.name2id.keys()
)
vars = list(vars)
indices = list(map(lambda n: self.name2id[n], names))
indices = [self.name2id[n] for n in names]
for i, v in zip(indices, values):
vars[i] = v
self.setter(vars)
......
......@@ -329,7 +329,7 @@ class While:
if inner_var:
out_vars.append(inner_var)
x_name_list |= set(map(lambda x: x.name, out_vars))
x_name_list |= {x.name for x in out_vars}
# NOTE(dev): cond_var has been contained in Input('Condition'), so
# we remove it from Input('X')
x_name_list -= {self.cond_var.name}
......
......@@ -75,12 +75,11 @@ def _remove_unused_var_nodes(graph):
all_used_vars.add(output_node)
all_used_vars = {n.node for n in all_used_vars}
all_unused_vars = {
n
for n in filter(
all_unused_vars = set(
filter(
lambda node: node.node not in all_used_vars, graph.all_var_nodes()
)
}
)
graph.safe_remove_nodes(all_unused_vars)
return graph
......
......@@ -532,13 +532,12 @@ class Quant2Int8MkldnnPass:
all_used_vars.add(output_node)
all_used_vars = {n.node for n in all_used_vars}
all_unused_vars = {
n
for n in filter(
all_unused_vars = set(
filter(
lambda node: node.node not in all_used_vars,
graph.all_var_nodes(),
)
}
)
graph.safe_remove_nodes(all_unused_vars)
return graph
......
......@@ -280,11 +280,10 @@ class QuantInt8MkldnnPass:
all_used_vars.add(output_node)
all_used_vars = {n.node for n in all_used_vars}
all_unused_vars = {
n
for n in filter(
all_unused_vars = set(
filter(
lambda node: node.node not in all_used_vars,
graph.all_var_nodes(),
)
}
)
graph.safe_remove_nodes(all_unused_vars)
......@@ -1407,13 +1407,12 @@ class QuantizationFreezePass:
all_used_vars.add(output_node)
all_used_vars = {n.node for n in all_used_vars}
all_unused_vars = {
n
for n in filter(
all_unused_vars = set(
filter(
lambda node: node.node not in all_used_vars,
graph.all_var_nodes(),
)
}
)
graph.safe_remove_nodes(all_unused_vars)
def _original_var_name(self, var_name):
......@@ -1524,13 +1523,12 @@ class ConvertToInt8Pass:
all_used_vars.add(output_node)
all_used_vars = {n.node for n in all_used_vars}
all_unused_vars = {
n
for n in filter(
all_unused_vars = set(
filter(
lambda node: node.node not in all_used_vars,
graph.all_var_nodes(),
)
}
)
graph.safe_remove_nodes(all_unused_vars)
......@@ -3224,13 +3222,12 @@ class QuantWeightPass:
all_used_vars.add(output_node)
all_used_vars = {n.node for n in all_used_vars}
all_unused_vars = {
n
for n in filter(
all_unused_vars = set(
filter(
lambda node: node.node not in all_used_vars,
graph.all_var_nodes(),
)
}
)
graph.safe_remove_nodes(all_unused_vars)
def _load_var(self, name):
......
......@@ -246,7 +246,7 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase):
if iters == skip_batch_num:
total_samples = 0
infer_start_time = time.time()
images = list(map(lambda x: x[0].reshape(dshape), data))
images = [x[0].reshape(dshape) for x in data]
images = np.array(images).astype('float32')
labels = np.array([x[1] for x in data]).astype('int64')
......
......@@ -214,7 +214,7 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase):
if iters == skip_batch_num:
total_samples = 0
infer_start_time = time.time()
images = list(map(lambda x: x[0].reshape(dshape), data))
images = [x[0].reshape(dshape) for x in data]
images = np.array(images).astype('float32')
labels = np.array([x[1] for x in data]).astype('int64')
......
......@@ -232,7 +232,7 @@ def build_global_view(nop_labels, rhs, n_bcast_dims):
g_labels_sum = ''.join(labels)
g_labels = g_labels_out + g_labels_sum
g_view = list(map(lambda i: build_view(i, g_labels), nop_labels))
g_view = [build_view(i, g_labels) for i in nop_labels]
g_nout = len(g_labels_out)
g_count = count
......@@ -741,7 +741,7 @@ def parse_fake_shape(equation, operands, labels):
list of shape
"""
origin_labels = map(lambda x: x.strip(), equation.split(','))
origin_labels = (x.strip() for x in equation.split(','))
shaped = collections.namedtuple('shaped', ['shape'])
def fake_shape(ori_label, label, op):
......@@ -1047,7 +1047,7 @@ def einsum(equation, *operands):
# To handle broadcasting, we should first know how many dimensions are there
# We need to use that number to generate output labels
# e.g. 1 for ['ij', 'i.', '.k']
n_bcast_dims = max(map(lambda s: s.count('.'), nop_labels))
n_bcast_dims = max(s.count('.') for s in nop_labels)
# Build the data structures for planning. It's helpful to think of all the operands
# broadcasting together from a global view. In this view, dimensions from multiple
......
......@@ -128,9 +128,7 @@ def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None):
op = stack if use_stack else concat
res = op(input, axis=axis)
sizes = paddle.to_tensor(
np.array(list(map(lambda x: int(x.shape[axis]), input)))
)
sizes = paddle.to_tensor(np.array([int(x.shape[axis]) for x in input]))
return res, sizes
else:
check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor')
......@@ -335,7 +333,7 @@ def slice(input, axes, starts, ends):
]
elif isinstance(starts, tmp_tensor_type):
tensor_t = starts.numpy(False)
starts = [ele for ele in tensor_t]
starts = list(tensor_t)
infer_flags = [-1 for i in range(len(axes))]
if isinstance(ends, (list, tuple)):
......@@ -345,7 +343,7 @@ def slice(input, axes, starts, ends):
]
elif isinstance(ends, tmp_tensor_type):
tensor_t = ends.numpy(False)
ends = [ele for ele in tensor_t]
ends = list(tensor_t)
infer_flags = [-1 for i in range(len(axes))]
return _C_ops.slice(input, axes, starts, ends, infer_flags, [])
......@@ -2048,12 +2046,10 @@ def split(x, num_or_sections, axis=0, name=None):
len(num_or_sections) <= input_shape[dim]
), 'len(num_or_sections) must not be more than input.shape[dim].'
num = len(num_or_sections)
attrs['sections'] = list(
map(
lambda ele: -1 if isinstance(ele, Variable) else ele,
num_or_sections,
)
)
attrs['sections'] = [
-1 if isinstance(ele, Variable) else ele
for ele in num_or_sections
]
if paddle.utils._contain_var(num_or_sections):
inputs['SectionsTensorList'] = _get_SectionsTensorList(
num_or_sections
......
......@@ -115,8 +115,8 @@ def _to_summary(var):
else:
# recursively handle all dimensions
if var.shape[0] > 2 * edgeitems:
begin = [x for x in var[:edgeitems]]
end = [x for x in var[(-1 * edgeitems) :]]
begin = list(var[:edgeitems])
end = list(var[(-1 * edgeitems) :])
return np.stack([_to_summary(x) for x in (begin + end)])
else:
return np.stack([_to_summary(x) for x in var])
......
......@@ -313,8 +313,8 @@ def _recursive_assert_same_structure(nest1, nest2, check_types):
keys1, keys2
)
)
nest1_as_sequence = [n for n in _yield_value(nest1)]
nest2_as_sequence = [n for n in _yield_value(nest2)]
nest1_as_sequence = list(_yield_value(nest1))
nest2_as_sequence = list(_yield_value(nest2))
for n1, n2 in zip(nest1_as_sequence, nest2_as_sequence):
_recursive_assert_same_structure(n1, n2, check_types)
......@@ -454,12 +454,7 @@ def convert_shape_to_list(shape):
Convert shape(list, tuple, variable) to list in imperative mode
"""
if isinstance(shape, (list, tuple)):
shape = list(
map(
lambda x: x.item(0) if isinstance(x, Variable) else x,
shape,
)
)
shape = [x.item(0) if isinstance(x, Variable) else x for x in shape]
else:
shape = shape.astype(int).tolist()
return shape
......
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册