未验证 提交 929892c3 编写于 作者: C cyberslack_lee 提交者: GitHub

[CodeStyle][C416][C417] rewrite unnecessary comprehension with function call...

[CodeStyle][C416][C417] rewrite unnecessary comprehension with function call and use generator instead of map (#52140)

* codestyle c416 c417

* fix error

* fix inc

* unify all C4 rules into one

* fix inc

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 b6ae6a5d
...@@ -59,7 +59,7 @@ def find_arch_range(min_arch, max_arch): ...@@ -59,7 +59,7 @@ def find_arch_range(min_arch, max_arch):
def find_max_arch(arch): def find_max_arch(arch):
arch = list(sorted(arch)) arch = sorted(arch)
idx = DEFAULT_ARCH.index(arch[-1]) idx = DEFAULT_ARCH.index(arch[-1])
if idx == len(DEFAULT_ARCH) - 1: if idx == len(DEFAULT_ARCH) - 1:
return MAX_ARCH return MAX_ARCH
......
...@@ -33,21 +33,7 @@ select = [ ...@@ -33,21 +33,7 @@ select = [
"F401", "F401",
# Comprehensions # Comprehensions
"C400", "C4",
"C401",
"C402",
"C403",
"C404",
"C405",
"C408",
"C409",
"C410",
"C411",
# "C413",
# "C414",
# "C415",
# "C416",
# "C417",
# Pyupgrade # Pyupgrade
"UP001", "UP001",
...@@ -174,5 +160,7 @@ unfixable = [ ...@@ -174,5 +160,7 @@ unfixable = [
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py" = ["UP034"] "python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py" = ["UP034"]
# Ignore version check in setup.py # Ignore version check in setup.py
"setup.py" = ["UP036"] "setup.py" = ["UP036"]
# Ignore unnecessary comprehension in dy2st unittest test_loop
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py" = ["C416"]
# Ignore unnecessary lambda in dy2st unittest test_lambda # Ignore unnecessary lambda in dy2st unittest test_lambda
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_lambda.py" = ["PLC3002"] "python/paddle/fluid/tests/unittests/dygraph_to_static/test_lambda.py" = ["PLC3002"]
...@@ -170,8 +170,8 @@ class DistributedSaver: ...@@ -170,8 +170,8 @@ class DistributedSaver:
global_block = dist_main_prog.global_block() global_block = dist_main_prog.global_block()
ops = global_block.ops ops = global_block.ops
feed_vars_names = list(map(lambda x: x.name, feed_vars)) feed_vars_names = [x.name for x in feed_vars]
fetch_vars_names = list(map(lambda x: x.name, fetch_vars)) fetch_vars_names = [x.name for x in fetch_vars]
last_idx = -1 last_idx = -1
for idx, op in enumerate(ops): for idx, op in enumerate(ops):
......
...@@ -39,7 +39,7 @@ class DistributedTensor: ...@@ -39,7 +39,7 @@ class DistributedTensor:
): ):
if not ( if not (
isinstance(sizes, (list, tuple)) isinstance(sizes, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x >= 0, sizes)) and all(isinstance(x, int) and x >= 0 for x in sizes)
): ):
raise ValueError( raise ValueError(
"The sizes must be list or tuple and item in sizes must be non-negative integer, but got {}".format( "The sizes must be list or tuple and item in sizes must be non-negative integer, but got {}".format(
...@@ -48,7 +48,7 @@ class DistributedTensor: ...@@ -48,7 +48,7 @@ class DistributedTensor:
) )
if not ( if not (
isinstance(dims_mapping, (list, tuple)) isinstance(dims_mapping, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x >= -1, dims_mapping)) and all(isinstance(x, int) and x >= -1 for x in dims_mapping)
): ):
raise ValueError( raise ValueError(
"The dims_mapping must be list or tuple and item in dims_mapping must >= -1, but got {}".format( "The dims_mapping must be list or tuple and item in dims_mapping must >= -1, but got {}".format(
...@@ -57,7 +57,7 @@ class DistributedTensor: ...@@ -57,7 +57,7 @@ class DistributedTensor:
) )
if not ( if not (
isinstance(processes, (list, tuple)) isinstance(processes, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x >= 0, processes)) and all(isinstance(x, int) and x >= 0 for x in processes)
): ):
raise ValueError( raise ValueError(
"The processes must be list or tuple and item in processes must be integer, but got {}".format( "The processes must be list or tuple and item in processes must be integer, but got {}".format(
...@@ -66,7 +66,7 @@ class DistributedTensor: ...@@ -66,7 +66,7 @@ class DistributedTensor:
) )
if not ( if not (
isinstance(topology, (list, tuple)) isinstance(topology, (list, tuple))
and all(map(lambda x: isinstance(x, int) and x > 0, topology)) and all(isinstance(x, int) and x > 0 for x in topology)
): ):
raise ValueError( raise ValueError(
"The topology must be list or tuple and item in topology must be non-negative integer, but got {}".format( "The topology must be list or tuple and item in topology must be non-negative integer, but got {}".format(
...@@ -162,9 +162,9 @@ class DistributedTensor: ...@@ -162,9 +162,9 @@ class DistributedTensor:
len(local_sizes), len(local_offsets) len(local_sizes), len(local_offsets)
) )
local_end_offsets = list( local_end_offsets = [
map(lambda x: x[0] + x[1], zip(local_offsets, local_sizes)) x[0] + x[1] for x in zip(local_offsets, local_sizes)
) ]
local_shard = list(zip(local_offsets, local_end_offsets)) local_shard = list(zip(local_offsets, local_end_offsets))
return local_shard return local_shard
......
...@@ -337,7 +337,7 @@ class PlanSpace: ...@@ -337,7 +337,7 @@ class PlanSpace:
vars = program.global_block().vars vars = program.global_block().vars
processes = reduce(lambda x, y: x * y, process_mesh_topology) processes = reduce(lambda x, y: x * y, process_mesh_topology)
global_group = [i for i in range(processes)] global_group = list(range(processes))
global_process_mesh = None global_process_mesh = None
pipeline_process_meshes = None pipeline_process_meshes = None
......
...@@ -1340,15 +1340,13 @@ class Resharder: ...@@ -1340,15 +1340,13 @@ class Resharder:
if op_input: if op_input:
op_input_dims_mapping = dist_attr[1] op_input_dims_mapping = dist_attr[1]
if all( if all(
map( x
lambda x: x, for x in [
[
tensor_dims_mapping, tensor_dims_mapping,
tensor_process_mesh, tensor_process_mesh,
op_input_dims_mapping, op_input_dims_mapping,
op_process_mesh, op_process_mesh,
], ]
)
): ):
# judge whether need reshard by dims_mapping # judge whether need reshard by dims_mapping
if tensor_dims_mapping != op_input_dims_mapping: if tensor_dims_mapping != op_input_dims_mapping:
...@@ -1379,15 +1377,13 @@ class Resharder: ...@@ -1379,15 +1377,13 @@ class Resharder:
else: else:
op_output_dims_mapping = dist_attr[1] op_output_dims_mapping = dist_attr[1]
if all( if all(
map( x
lambda x: x, for x in [
[
tensor_dims_mapping, tensor_dims_mapping,
tensor_process_mesh, tensor_process_mesh,
op_output_dims_mapping, op_output_dims_mapping,
op_process_mesh, op_process_mesh,
], ]
)
): ):
if tensor_dims_mapping != op_output_dims_mapping: if tensor_dims_mapping != op_output_dims_mapping:
raise ValueError( raise ValueError(
...@@ -1554,7 +1550,7 @@ class Resharder: ...@@ -1554,7 +1550,7 @@ class Resharder:
i += 1 i += 1
if i == len(has_used): if i == len(has_used):
has_used = list(map(lambda x: False, has_used)) has_used = [False for x in has_used]
to_send_process = process_list[0] to_send_process = process_list[0]
has_used[0] = True has_used[0] = True
assert ( assert (
...@@ -1744,11 +1740,9 @@ class Resharder: ...@@ -1744,11 +1740,9 @@ class Resharder:
if isinstance(op_desc, AllGatherOpDesc): # noqa: F401 if isinstance(op_desc, AllGatherOpDesc): # noqa: F401
if var_name not in self.has_allgather.keys(): if var_name not in self.has_allgather.keys():
self.has_allgather[var_name] = [] self.has_allgather[var_name] = []
if not self.has_allgather[ if not self.has_allgather[var_name] or op_desc.group not in [
var_name x[0] for x in self.has_allgather[var_name]
] or op_desc.group not in list( ]:
map(lambda x: x[0], self.has_allgather[var_name])
):
if op_desc.is_bool: if op_desc.is_bool:
# for bool data allgather, cast to int64 -> allgather -> cast bool # for bool data allgather, cast to int64 -> allgather -> cast bool
out_cast = Inserter.insert_cast_op( out_cast = Inserter.insert_cast_op(
......
...@@ -290,7 +290,7 @@ class ParallelTuner: ...@@ -290,7 +290,7 @@ class ParallelTuner:
return self._cached_dims_mapping_candidates[key] return self._cached_dims_mapping_candidates[key]
candidates = [] candidates = []
dims_mapping = [-1 for i in range(dims_mapping_len)] dims_mapping = [-1 for i in range(dims_mapping_len)]
dims_list = [i for i in range(process_mesh_len)] dims_list = list(range(process_mesh_len))
visited = [False for i in range(process_mesh_len)] visited = [False for i in range(process_mesh_len)]
self._generate_dims_mapping_candidates_helper( self._generate_dims_mapping_candidates_helper(
dims_mapping, dims_list, 0, visited, candidates dims_mapping, dims_list, 0, visited, candidates
......
...@@ -1631,13 +1631,12 @@ class RuleBasedTuner: ...@@ -1631,13 +1631,12 @@ class RuleBasedTuner:
Most of the logic is the same as the update completion in the completer. Most of the logic is the same as the update completion in the completer.
""" """
world_ranks = ProcessMesh( world_ranks = ProcessMesh(
[ list(
i range(
for i in range(
self._cluster.get_num_machines() self._cluster.get_num_machines()
* self._cluster._num_devices_per_machine * self._cluster._num_devices_per_machine
) )
] )
) )
dist_tensors = sub_program_dist_context._dist_tensors_for_program dist_tensors = sub_program_dist_context._dist_tensors_for_program
...@@ -1958,10 +1957,9 @@ class RuleBasedTuner: ...@@ -1958,10 +1957,9 @@ class RuleBasedTuner:
self.device_meshes_list.append([]) self.device_meshes_list.append([])
for device_mesh in device_meshes: for device_mesh in device_meshes:
devices = reduce(lambda x, y: x * y, device_mesh) devices = reduce(lambda x, y: x * y, device_mesh)
processes = [ processes = list(
i range(has_used_devices, has_used_devices + devices)
for i in range(has_used_devices, has_used_devices + devices) )
]
device_mesh_shape = ( device_mesh_shape = (
device_mesh device_mesh
if device_mesh[0] != 1 if device_mesh[0] != 1
......
...@@ -117,7 +117,7 @@ class TunableSpace: ...@@ -117,7 +117,7 @@ class TunableSpace:
{"class_name": v.__class__.__name__, "state": v.get_state()} {"class_name": v.__class__.__name__, "state": v.get_state()}
for v in self._variables.values() for v in self._variables.values()
], ],
"values": {k: v for (k, v) in self.values.items()}, "values": dict(self.values.items()),
} }
@classmethod @classmethod
...@@ -126,7 +126,7 @@ class TunableSpace: ...@@ -126,7 +126,7 @@ class TunableSpace:
for v in state["variables"]: for v in state["variables"]:
v = _deserialize_tunable_variable(v) v = _deserialize_tunable_variable(v)
ts._variables[v.name] = v ts._variables[v.name] = v
ts._values = {k: v for (k, v) in state["values"].items()} ts._values = dict(state["values"].items())
return ts return ts
......
...@@ -115,7 +115,7 @@ class Choice(TunableVariable): ...@@ -115,7 +115,7 @@ class Choice(TunableVariable):
default = bool(default) default = bool(default)
else: else:
self._is_unknown_type = True self._is_unknown_type = True
self._indices = [i for i in range(len(values))] self._indices = list(range(len(values)))
self.values = values self.values = values
if default is not None and default not in values: if default is not None and default not in values:
......
...@@ -1684,7 +1684,7 @@ def get_standalone_cost_data(distributed_programs): ...@@ -1684,7 +1684,7 @@ def get_standalone_cost_data(distributed_programs):
shape = info[ shape = info[
shape_left_boundary + 1 : shape_right_boundary shape_left_boundary + 1 : shape_right_boundary
].split(",") ].split(",")
shape = list(map(lambda x: int(x.strip()), shape)) shape = [int(x.strip()) for x in shape]
dtype_factor = 1 dtype_factor = 1
total_static_input_size += reduce(lambda x, y: x * y, shape) total_static_input_size += reduce(lambda x, y: x * y, shape)
if op.type == "c_embedding": if op.type == "c_embedding":
......
...@@ -87,9 +87,7 @@ paddlecloud environment.".format( ...@@ -87,9 +87,7 @@ paddlecloud environment.".format(
if started_port is None: if started_port is None:
started_port = 6170 started_port = 6170
ports = [ ports = list(range(started_port, started_port + len(selected_devices)))
x for x in range(started_port, started_port + len(selected_devices))
]
trainer_endpoints = [] trainer_endpoints = []
for ip in node_ips: for ip in node_ips:
trainer_endpoints.append(["%s:%d" % (ip, port) for port in ports]) trainer_endpoints.append(["%s:%d" % (ip, port) for port in ports])
......
...@@ -129,9 +129,7 @@ def get_cloud_cluster( ...@@ -129,9 +129,7 @@ def get_cloud_cluster(
device_count = 1 device_count = 1
devices_per_proc = [str(x) for x in range(device_count)] devices_per_proc = [str(x) for x in range(device_count)]
free_ports = [ free_ports = list(range(start_port, start_port + len(devices_per_proc)))
x for x in range(start_port, start_port + len(devices_per_proc))
]
trainer_endpoints = [] trainer_endpoints = []
for ip in node_ips: for ip in node_ips:
......
...@@ -382,7 +382,7 @@ class UtilBase: ...@@ -382,7 +382,7 @@ class UtilBase:
if paddle.static.io.is_persistable(v) if paddle.static.io.is_persistable(v)
] ]
pruned_vars = OrderedDict(pruned_vars) pruned_vars = OrderedDict(pruned_vars)
pruned_vars_name = [name for name in pruned_vars] pruned_vars_name = list(pruned_vars)
print("persistable vars in pruned program: {}".format(pruned_vars_name)) print("persistable vars in pruned program: {}".format(pruned_vars_name))
# feed and fetch op is added in pruned program when pruning, not need to be found in train program # feed and fetch op is added in pruned program when pruning, not need to be found in train program
......
...@@ -75,9 +75,7 @@ paddlecloud environment.".format( ...@@ -75,9 +75,7 @@ paddlecloud environment.".format(
if started_port is None: if started_port is None:
started_port = 6170 started_port = 6170
ports = [ ports = list(range(started_port, started_port + len(devices_per_proc)))
x for x in range(started_port, started_port + len(devices_per_proc))
]
trainer_endpoints = [] trainer_endpoints = []
for ip in node_ips: for ip in node_ips:
trainer_endpoints.append(["%s:%d" % (ip, port) for port in ports]) trainer_endpoints.append(["%s:%d" % (ip, port) for port in ports])
......
...@@ -338,7 +338,7 @@ class ElasticManager: ...@@ -338,7 +338,7 @@ class ElasticManager:
ip = endpoints ip = endpoints
port = start_port port = start_port
ports = [x for x in range(port, port + len(devices_per_proc))] ports = list(range(port, port + len(devices_per_proc)))
endpoint_list.extend(["%s:%d" % (ip, port) for port in ports]) endpoint_list.extend(["%s:%d" % (ip, port) for port in ports])
dist_endpoints = ','.join(endpoint_list) dist_endpoints = ','.join(endpoint_list)
...@@ -360,7 +360,7 @@ class ElasticManager: ...@@ -360,7 +360,7 @@ class ElasticManager:
self.etcd.cancel_watch(watch) self.etcd.cancel_watch(watch)
self.etcd.delete(self.host_path) self.etcd.delete(self.host_path)
hosts = [i for i in self.etcd.get_prefix(self.node_prefix)] hosts = list(self.etcd.get_prefix(self.node_prefix))
if len(hosts) == 0: if len(hosts) == 0:
self.etcd.delete_prefix(self.prefix) self.etcd.delete_prefix(self.prefix)
......
...@@ -314,9 +314,7 @@ def get_cluster_from_args(args, device_mode, devices_per_proc): ...@@ -314,9 +314,7 @@ def get_cluster_from_args(args, device_mode, devices_per_proc):
if os.environ.get('FLAGS_START_PORT') is not None: if os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT')) start_port = int(os.environ.get('FLAGS_START_PORT'))
free_ports = [ free_ports = list(range(start_port, start_port + len(devices_per_proc)))
x for x in range(start_port, start_port + len(devices_per_proc))
]
trainer_endpoints = [] trainer_endpoints = []
for ip in node_ips: for ip in node_ips:
......
...@@ -951,7 +951,7 @@ def get_device_proc_info(args): ...@@ -951,7 +951,7 @@ def get_device_proc_info(args):
if args.nproc_per_node is None: if args.nproc_per_node is None:
devices_per_proc = [0] devices_per_proc = [0]
else: else:
devices_per_proc = [x for x in range(0, args.nproc_per_node)] devices_per_proc = list(range(0, args.nproc_per_node))
else: else:
raise AssertionError( raise AssertionError(
"Can't support device_mode:{}, support only cpu|gpu|xpu now.".format( "Can't support device_mode:{}, support only cpu|gpu|xpu now.".format(
...@@ -1107,20 +1107,14 @@ def get_mapped_cluster_from_args_without_rank_mapping(args, device_mode): ...@@ -1107,20 +1107,14 @@ def get_mapped_cluster_from_args_without_rank_mapping(args, device_mode):
node_rank = node_ips.index(ip) node_rank = node_ips.index(ip)
if os.environ.get('PADDLE_PORT') is not None: if os.environ.get('PADDLE_PORT') is not None:
start_port = int(os.getenv("PADDLE_PORT", "")) start_port = int(os.getenv("PADDLE_PORT", ""))
free_ports = [ free_ports = list(
x range(start_port, start_port + len(node_ranks[node_rank]))
for x in range(
start_port, start_port + len(node_ranks[node_rank])
) )
]
elif os.environ.get('FLAGS_START_PORT') is not None: elif os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT')) start_port = int(os.environ.get('FLAGS_START_PORT'))
free_ports = [ free_ports = list(
x range(start_port, start_port + len(node_ranks[node_rank]))
for x in range(
start_port, start_port + len(node_ranks[node_rank])
) )
]
else: else:
free_ports = find_free_ports(len(node_ranks[node_rank])) free_ports = find_free_ports(len(node_ranks[node_rank]))
trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports]) trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
...@@ -1250,20 +1244,14 @@ def get_mapped_cluster_from_args_with_rank_mapping(args, device_mode): ...@@ -1250,20 +1244,14 @@ def get_mapped_cluster_from_args_with_rank_mapping(args, device_mode):
node_rank = node_ips.index(ip) node_rank = node_ips.index(ip)
if os.environ.get('PADDLE_PORT') is not None: if os.environ.get('PADDLE_PORT') is not None:
start_port = int(os.getenv("PADDLE_PORT", "")) start_port = int(os.getenv("PADDLE_PORT", ""))
free_ports = [ free_ports = list(
x range(start_port, start_port + len(node_ranks[node_rank]))
for x in range(
start_port, start_port + len(node_ranks[node_rank])
) )
]
elif os.environ.get('FLAGS_START_PORT') is not None: elif os.environ.get('FLAGS_START_PORT') is not None:
start_port = int(os.environ.get('FLAGS_START_PORT')) start_port = int(os.environ.get('FLAGS_START_PORT'))
free_ports = [ free_ports = list(
x range(start_port, start_port + len(node_ranks[node_rank]))
for x in range(
start_port, start_port + len(node_ranks[node_rank])
) )
]
else: else:
free_ports = find_free_ports(len(node_ranks[node_rank])) free_ports = find_free_ports(len(node_ranks[node_rank]))
trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports]) trainer_endpoints.append(["%s:%d" % (ip, port) for port in free_ports])
......
...@@ -196,7 +196,7 @@ class AscendIRParser: ...@@ -196,7 +196,7 @@ class AscendIRParser:
HcomGroupConfig( HcomGroupConfig(
name="hcom_group_0", name="hcom_group_0",
nranks=fleet.world_size(), nranks=fleet.world_size(),
rank_ids=[x for x in range(fleet.world_size())], rank_ids=list(range(fleet.world_size())),
) )
) )
......
...@@ -2422,7 +2422,7 @@ class TransposeGradParser(AscendParserBase): ...@@ -2422,7 +2422,7 @@ class TransposeGradParser(AscendParserBase):
x_shape = self.op.block.var(self.op.input_arg_names[1]).shape[1:] x_shape = self.op.block.var(self.op.input_arg_names[1]).shape[1:]
out_grad_shape = self.op.block.var(self.op.input_arg_names[0]).shape out_grad_shape = self.op.block.var(self.op.input_arg_names[0]).shape
assert list(map(lambda x: out_grad_shape[x], perm)) == list(x_shape) assert [out_grad_shape[x] for x in perm] == list(x_shape)
x_grad = ( x_grad = (
core.GEOperatorFactory.create_operator( core.GEOperatorFactory.create_operator(
......
...@@ -27,7 +27,7 @@ def _obtain_optimizer_parameters_list(optimizer): ...@@ -27,7 +27,7 @@ def _obtain_optimizer_parameters_list(optimizer):
for param in group['params']: for param in group['params']:
parameters_list.append(param) parameters_list.append(param)
else: else:
parameters_list = [param for param in optimizer._parameter_list] parameters_list = list(optimizer._parameter_list)
return parameters_list return parameters_list
......
...@@ -37,7 +37,7 @@ def _obtain_optimizer_parameters_list(optimizer): ...@@ -37,7 +37,7 @@ def _obtain_optimizer_parameters_list(optimizer):
for param in group['params']: for param in group['params']:
parameters_list.append(param) parameters_list.append(param)
else: else:
parameters_list = [param for param in optimizer._parameter_list] parameters_list = list(optimizer._parameter_list)
return parameters_list return parameters_list
......
...@@ -337,7 +337,7 @@ class PipelineParallel(MetaParallelBase): ...@@ -337,7 +337,7 @@ class PipelineParallel(MetaParallelBase):
assert len(outputs) == len(output_tensor_grad) assert len(outputs) == len(output_tensor_grad)
paddle.autograd.backward( paddle.autograd.backward(
tensors=outputs, tensors=outputs,
grad_tensors=[t for t in output_tensor_grad], grad_tensors=list(output_tensor_grad),
) )
else: else:
paddle.autograd.backward( paddle.autograd.backward(
......
...@@ -438,7 +438,7 @@ class GroupShardedOptimizerStage2(Optimizer): ...@@ -438,7 +438,7 @@ class GroupShardedOptimizerStage2(Optimizer):
if self.offload: if self.offload:
self._optim._master_weights = self._master_params self._optim._master_weights = self._master_params
cpu_master_params = [p for p in self._master_params.values()] cpu_master_params = list(self._master_params.values())
for param in cpu_master_params: for param in cpu_master_params:
size = param._numel() * align[Type.fp32.value] size = param._numel() * align[Type.fp32.value]
remaining = size % alignment[self.offload_device] remaining = size % alignment[self.offload_device]
......
...@@ -79,12 +79,10 @@ class GroupShardedStage2(nn.Layer): ...@@ -79,12 +79,10 @@ class GroupShardedStage2(nn.Layer):
else sharding_optimizer else sharding_optimizer
) )
assert all( assert all(
list( [
map( isinstance(opt, GroupShardedOptimizerStage2)
lambda opt: isinstance(opt, GroupShardedOptimizerStage2), for opt in self._sharding_optimizers
self._sharding_optimizers, ]
)
)
), "Please use GroupShardedOptimizerStage2 optimizer" ), "Please use GroupShardedOptimizerStage2 optimizer"
self._sync_buffers = sync_buffers self._sync_buffers = sync_buffers
self._auto_refresh_trainable = auto_refresh_trainable self._auto_refresh_trainable = auto_refresh_trainable
......
...@@ -335,7 +335,7 @@ class GroupShardedStage3(nn.Layer): ...@@ -335,7 +335,7 @@ class GroupShardedStage3(nn.Layer):
buffer_size[param.dtype] += param._numel() + p_align buffer_size[param.dtype] += param._numel() + p_align
# Create unslice_params'grad # Create unslice_params'grad
for param in sorted(list(self._unslice_params), key=lambda p: p.name): for param in sorted(self._unslice_params, key=lambda p: p.name):
if param.dtype not in self._grad_storages.keys(): if param.dtype not in self._grad_storages.keys():
self._grad_storages[param.dtype] = GradStorage( self._grad_storages[param.dtype] = GradStorage(
buffer_size[param.dtype], buffer_size[param.dtype],
......
...@@ -1445,7 +1445,7 @@ class TheOnePSRuntime(RuntimeBase): ...@@ -1445,7 +1445,7 @@ class TheOnePSRuntime(RuntimeBase):
generate_vars = self.context[ generate_vars = self.context[
"user_defined_strategy" "user_defined_strategy"
].trainer_desc_configs["stat_var_names"] ].trainer_desc_configs["stat_var_names"]
generate_vars = [var for var in generate_vars] generate_vars = list(generate_vars)
remaining_vars = list( remaining_vars = list(
filter( filter(
TheOnePSRuntime.__exclude_vars(sparse_names), TheOnePSRuntime.__exclude_vars(sparse_names),
......
...@@ -207,7 +207,7 @@ class ETCDMaster(Master): ...@@ -207,7 +207,7 @@ class ETCDMaster(Master):
while not self.ctx.status.is_done(): while not self.ctx.status.is_done():
self.client.put(path, value.encode('latin-1')) self.client.put(path, value.encode('latin-1'))
result = [i for i in self.client.get_prefix(prefix)] result = list(self.client.get_prefix(prefix))
result = copy.deepcopy(result) result = copy.deepcopy(result)
self.ctx.logger.debug("sync peers {}".format(result)) self.ctx.logger.debug("sync peers {}".format(result))
......
...@@ -47,8 +47,8 @@ class PSController(Controller): ...@@ -47,8 +47,8 @@ class PSController(Controller):
else: else:
host = self.ctx.node.ip host = self.ctx.node.ip
server_endpoints = [s for s in self.ctx.args.servers.split(",")] server_endpoints = list(self.ctx.args.servers.split(","))
trainer_endpoints = [s for s in self.ctx.args.trainers.split(",")] trainer_endpoints = list(self.ctx.args.trainers.split(","))
servers = [ servers = [
s for s in self.ctx.args.servers.split(",") if s.startswith(host) s for s in self.ctx.args.servers.split(",") if s.startswith(host)
] ]
......
...@@ -643,7 +643,7 @@ class DataParallelOptimizationPass(PassBase): ...@@ -643,7 +643,7 @@ class DataParallelOptimizationPass(PassBase):
) )
# insert dependency op # insert dependency op
indice = sorted(list(dep_map.keys()), reverse=True) indice = sorted(dep_map.keys(), reverse=True)
for i in indice: for i in indice:
for idx, prior_vars, post_vars, op_role in dep_map[i][::-1]: for idx, prior_vars, post_vars, op_role in dep_map[i][::-1]:
depend_op = insert_dependencies_for_vars( depend_op = insert_dependencies_for_vars(
......
...@@ -893,7 +893,7 @@ class ShardingPass(PassBase): ...@@ -893,7 +893,7 @@ class ShardingPass(PassBase):
) )
# insert deps # insert deps
indice = sorted(list(dep_map.keys()), reverse=True) indice = sorted(dep_map.keys(), reverse=True)
for i in indice: for i in indice:
for idx, prior_vars, post_vars, comm_stream in dep_map[i][::-1]: for idx, prior_vars, post_vars, comm_stream in dep_map[i][::-1]:
depend_op = insert_dependencies_for_vars( depend_op = insert_dependencies_for_vars(
...@@ -1263,7 +1263,7 @@ class ShardingPass(PassBase): ...@@ -1263,7 +1263,7 @@ class ShardingPass(PassBase):
idx += 1 idx += 1
# insert deps # insert deps
indice = sorted(list(dep_map.keys()), reverse=True) indice = sorted(dep_map.keys(), reverse=True)
for i in indice: for i in indice:
for idx, prior_vars, post_vars, comm_stream in dep_map[i][::-1]: for idx, prior_vars, post_vars, comm_stream in dep_map[i][::-1]:
depend_op = insert_dependencies_for_vars( depend_op = insert_dependencies_for_vars(
......
...@@ -138,7 +138,7 @@ class AutoParalSupplementDepPass(PassBase): ...@@ -138,7 +138,7 @@ class AutoParalSupplementDepPass(PassBase):
prior_varname = op.output("ParamOut")[0] prior_varname = op.output("ParamOut")[0]
# insert deps # insert deps
indice = sorted(list(deps_map.keys()), reverse=True) indice = sorted(deps_map.keys(), reverse=True)
for idx in indice: for idx in indice:
prior_var = main_block.var(deps_map[idx][0]) prior_var = main_block.var(deps_map[idx][0])
post_var = main_block.var(deps_map[idx][1]) post_var = main_block.var(deps_map[idx][1])
......
...@@ -1579,7 +1579,7 @@ class TheOnePSRuntime(RuntimeBase): ...@@ -1579,7 +1579,7 @@ class TheOnePSRuntime(RuntimeBase):
generate_vars = self.context[ generate_vars = self.context[
"user_defined_strategy" "user_defined_strategy"
].trainer_desc_configs["stat_var_names"] ].trainer_desc_configs["stat_var_names"]
generate_vars = [var for var in generate_vars] generate_vars = list(generate_vars)
remaining_vars = list( remaining_vars = list(
filter( filter(
TheOnePSRuntime.__exclude_vars(sparse_names), TheOnePSRuntime.__exclude_vars(sparse_names),
......
...@@ -1698,7 +1698,7 @@ def add_send_op(program, block, _vars): ...@@ -1698,7 +1698,7 @@ def add_send_op(program, block, _vars):
def get_vars_name_in_block(block): def get_vars_name_in_block(block):
vars_list = block.vars.keys() vars_list = block.vars.keys()
vars_name_list = [var_name for var_name in vars_list] vars_name_list = list(vars_list)
return vars_name_list return vars_name_list
......
...@@ -331,7 +331,7 @@ def _get_subprocess_env_list(nprocs, options): ...@@ -331,7 +331,7 @@ def _get_subprocess_env_list(nprocs, options):
# get cluster and pod config # get cluster and pod config
if options['backend'] == 'gloo': if options['backend'] == 'gloo':
devices_per_proc = [x for x in range(0, nprocs)] devices_per_proc = list(range(0, nprocs))
cluster, pod = get_cluster_from_args( cluster, pod = get_cluster_from_args(
args, DeviceMode.CPU, devices_per_proc args, DeviceMode.CPU, devices_per_proc
) )
......
...@@ -54,9 +54,9 @@ def get_cluster_from_args(args, selected_gpus): ...@@ -54,9 +54,9 @@ def get_cluster_from_args(args, selected_gpus):
if args.started_port is not None: if args.started_port is not None:
started_port = args.started_port started_port = args.started_port
free_ports = [ free_ports = list(
x for x in range(started_port, started_port + len(selected_gpus)) range(started_port, started_port + len(selected_gpus))
] )
trainer_endpoints = [] trainer_endpoints = []
for ip in node_ips: for ip in node_ips:
......
...@@ -16,9 +16,8 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main ...@@ -16,9 +16,8 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main
import paddle import paddle
import paddle.distributed as dist import paddle.distributed as dist
import paddle.fluid as fluid from paddle import fluid, framework
import paddle.fluid.data_feeder as data_feeder from paddle.fluid import data_feeder
import paddle.framework as framework
paddle.enable_static() paddle.enable_static()
......
...@@ -16,9 +16,8 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main ...@@ -16,9 +16,8 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main
import paddle import paddle
import paddle.distributed as dist import paddle.distributed as dist
import paddle.fluid as fluid from paddle import fluid, framework
import paddle.fluid.data_feeder as data_feeder from paddle.fluid import data_feeder
import paddle.framework as framework
paddle.enable_static() paddle.enable_static()
......
...@@ -72,7 +72,7 @@ class TestSimpleNet(TestParallelDyGraphRunnerBase): ...@@ -72,7 +72,7 @@ class TestSimpleNet(TestParallelDyGraphRunnerBase):
return model, train_reader, optimizer return model, train_reader, optimizer
def run_one_loop(self, model, optimizer, batch): def run_one_loop(self, model, optimizer, batch):
x_data = np.array([x for x in batch]) x_data = np.array(list(batch))
x_data = x_data.reshape((-1, 10)) x_data = x_data.reshape((-1, 10))
x = to_variable(x_data) x = to_variable(x_data)
out = model(x) out = model(x)
......
...@@ -60,7 +60,7 @@ class TestNoSync(TestParallelDyGraphRunnerBase): ...@@ -60,7 +60,7 @@ class TestNoSync(TestParallelDyGraphRunnerBase):
return model, train_reader, optimizer return model, train_reader, optimizer
def run_one_loop(self, model, optimizer, batch): def run_one_loop(self, model, optimizer, batch):
x_data = np.array([x for x in batch]) x_data = np.array(list(batch))
x_data = x_data.reshape((-1, 10)) x_data = x_data.reshape((-1, 10))
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out = model(x) out = model(x)
......
...@@ -55,7 +55,7 @@ class TestNoSyncControlFlow(TestNoSync): ...@@ -55,7 +55,7 @@ class TestNoSyncControlFlow(TestNoSync):
return model, train_reader, optimizer return model, train_reader, optimizer
def run_one_loop(self, model, optimizer, batch): def run_one_loop(self, model, optimizer, batch):
x_data = np.array([x for x in batch]) x_data = np.array(list(batch))
x_data = x_data.reshape((-1, 10)) x_data = x_data.reshape((-1, 10))
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out = model(x) out = model(x)
......
...@@ -54,7 +54,7 @@ class TestNoSyncUnusedParam(TestNoSync): ...@@ -54,7 +54,7 @@ class TestNoSyncUnusedParam(TestNoSync):
return model, train_reader, optimizer return model, train_reader, optimizer
def run_one_loop(self, model, optimizer, batch): def run_one_loop(self, model, optimizer, batch):
x_data = np.array([x for x in batch]) x_data = np.array(list(batch))
x_data = x_data.reshape((-1, 10)) x_data = x_data.reshape((-1, 10))
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out = model(x) out = model(x)
......
...@@ -282,7 +282,7 @@ class DataReader: ...@@ -282,7 +282,7 @@ class DataReader:
sent_ids = [0 for i in range(sent0_len)] + [ sent_ids = [0 for i in range(sent0_len)] + [
1 for i in range(sent1_len) 1 for i in range(sent1_len)
] ]
pos_ids = [i for i in range(sent0_len + sent1_len)] pos_ids = list(range(sent0_len + sent1_len))
label = 1 label = 1
yield token_ids, sent_ids, pos_ids, label yield token_ids, sent_ids, pos_ids, label
......
...@@ -185,9 +185,7 @@ class TestStaticAnalysis(unittest.TestCase): ...@@ -185,9 +185,7 @@ class TestStaticAnalysis(unittest.TestCase):
if wrapper.parent is not None: if wrapper.parent is not None:
self.assertTrue(wrapper in wrapper.parent.children) self.assertTrue(wrapper in wrapper.parent.children)
children_ast_nodes = [ children_ast_nodes = list(gast.iter_child_nodes(wrapper.node))
child for child in gast.iter_child_nodes(wrapper.node)
]
self.assertEqual(len(wrapper.children), len(children_ast_nodes)) self.assertEqual(len(wrapper.children), len(children_ast_nodes))
for child in wrapper.children: for child in wrapper.children:
self.assertTrue(child.node in children_ast_nodes) self.assertTrue(child.node in children_ast_nodes)
......
...@@ -55,8 +55,7 @@ class PrePostProcessLayer(Layer): ...@@ -55,8 +55,7 @@ class PrePostProcessLayer(Layer):
elif cmd == "n": # add layer normalization elif cmd == "n": # add layer normalization
self.functors.append( self.functors.append(
self.add_sublayer( self.add_sublayer(
"layer_norm_%d" "layer_norm_%d" % len(list(self.children())),
% len([layer for layer in self.children()]),
paddle.nn.LayerNorm( paddle.nn.LayerNorm(
normalized_shape=d_model, normalized_shape=d_model,
weight_attr=fluid.ParamAttr( weight_attr=fluid.ParamAttr(
......
...@@ -90,7 +90,7 @@ class TestFcFusePass(PassAutoScanTest): ...@@ -90,7 +90,7 @@ class TestFcFusePass(PassAutoScanTest):
if begin_norm_axis == x_shape_rank - 1 and draw(st.booleans()): if begin_norm_axis == x_shape_rank - 1 and draw(st.booleans()):
reduce_mean_dim = [-1] reduce_mean_dim = [-1]
else: else:
reduce_mean_dim = [i for i in range(x_shape_rank)] reduce_mean_dim = list(range(x_shape_rank))
reduce_mean_dim = reduce_mean_dim[begin_norm_axis:] reduce_mean_dim = reduce_mean_dim[begin_norm_axis:]
error_test_ratio = draw(st.integers(min_value=1, max_value=10)) error_test_ratio = draw(st.integers(min_value=1, max_value=10))
if error_test_ratio > 9: if error_test_ratio > 9:
......
...@@ -47,7 +47,7 @@ class TestOneDNNReshapeTransposeMatmulFusePass(PassAutoScanTest): ...@@ -47,7 +47,7 @@ class TestOneDNNReshapeTransposeMatmulFusePass(PassAutoScanTest):
def generate_input2(attrs): def generate_input2(attrs):
shape_x = [attrs[3]['batch_size'], attrs[3]['channel'], self.num] shape_x = [attrs[3]['batch_size'], attrs[3]['channel'], self.num]
input_volume = reduce(lambda x, y: x * y, shape_x) input_volume = reduce(lambda x, y: x * y, shape_x)
matmul_shape = [i for i in attrs[0]['shape']] matmul_shape = list(attrs[0]['shape'])
if 0 in matmul_shape: if 0 in matmul_shape:
for i in range(len(matmul_shape)): for i in range(len(matmul_shape)):
if matmul_shape[i] == 0: if matmul_shape[i] == 0:
......
...@@ -89,7 +89,7 @@ class TestTransposeFlattenConcatFusePass(PassAutoScanTest): ...@@ -89,7 +89,7 @@ class TestTransposeFlattenConcatFusePass(PassAutoScanTest):
inputs = {} inputs = {}
x_shape_rank = draw(st.integers(min_value=2, max_value=5)) x_shape_rank = draw(st.integers(min_value=2, max_value=5))
# Generate axis of transpose # Generate axis of transpose
trans_axis = [j for j in range(x_shape_rank)] trans_axis = list(range(x_shape_rank))
for j in range(x_shape_rank - 1): for j in range(x_shape_rank - 1):
if draw(st.booleans()): if draw(st.booleans()):
trans_axis[j], trans_axis[-1] = trans_axis[-1], trans_axis[j] trans_axis[j], trans_axis[-1] = trans_axis[-1], trans_axis[j]
......
...@@ -48,14 +48,14 @@ def reference_matmul(X, Y, transpose_x=False, transpose_y=False): ...@@ -48,14 +48,14 @@ def reference_matmul(X, Y, transpose_x=False, transpose_y=False):
elif X.ndim == 2: elif X.ndim == 2:
X = X.T X = X.T
else: else:
dim = [i for i in range(len(X.shape))] dim = list(range(len(X.shape)))
dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1] dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1]
X = np.transpose(X, tuple(dim)) X = np.transpose(X, tuple(dim))
if transpose_y: if transpose_y:
if Y.ndim == 1: if Y.ndim == 1:
Y = Y.reshape((Y.size,)) Y = Y.reshape((Y.size,))
else: else:
dim = [i for i in range(len(Y.shape))] dim = list(range(len(Y.shape)))
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1] dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
Y = np.transpose(Y, tuple(dim)) Y = np.transpose(Y, tuple(dim))
......
...@@ -64,7 +64,7 @@ class TestSimpleNet(TestParallelDyGraphRunnerBase): ...@@ -64,7 +64,7 @@ class TestSimpleNet(TestParallelDyGraphRunnerBase):
return model, train_reader, optimizer return model, train_reader, optimizer
def run_one_loop(self, model, optimizer, batch): def run_one_loop(self, model, optimizer, batch):
x_data = np.array([x for x in batch]) x_data = np.array(list(batch))
x_data = x_data.reshape((-1, 10)) x_data = x_data.reshape((-1, 10))
x = paddle.to_tensor(x_data) x = paddle.to_tensor(x_data)
out = model(x) out = model(x)
......
...@@ -60,7 +60,7 @@ class TestSimpleNet(TestParallelDyGraphRunnerBase): ...@@ -60,7 +60,7 @@ class TestSimpleNet(TestParallelDyGraphRunnerBase):
return model, train_reader, optimizer return model, train_reader, optimizer
def run_one_loop(self, model, optimizer, batch): def run_one_loop(self, model, optimizer, batch):
x_data = np.array([x for x in batch]) x_data = np.array(list(batch))
x_data = x_data.reshape((-1, 10)) x_data = x_data.reshape((-1, 10))
x = to_variable(x_data) x = to_variable(x_data)
out = model(x) out = model(x)
......
...@@ -104,7 +104,7 @@ class TestAllocContinuousSpace(OpTest): ...@@ -104,7 +104,7 @@ class TestAllocContinuousSpace(OpTest):
out[0:length] = input[1].flatten() out[0:length] = input[1].flatten()
inputs.append(out) inputs.append(out)
coalesce_tensor_var = np.concatenate([input for input in inputs]) coalesce_tensor_var = np.concatenate(list(inputs))
if set_constant: if set_constant:
coalesce_tensor_var = np.ones(len(coalesce_tensor_var)) * constant coalesce_tensor_var = np.ones(len(coalesce_tensor_var)) * constant
outputs = [ outputs = [
......
...@@ -65,7 +65,7 @@ def create_pyobject_test_data(shape=None, seed=None): ...@@ -65,7 +65,7 @@ def create_pyobject_test_data(shape=None, seed=None):
np.random.seed(seed) np.random.seed(seed)
list_shape = np.random.randint(0, high=100, size=(2)).tolist() list_shape = np.random.randint(0, high=100, size=(2)).tolist()
list_data = np.random.random(shape).tolist() list_data = np.random.random(shape).tolist()
dict_key = [i for i in range(0, shape[0])] dict_key = list(range(0, shape[0]))
dict_val = np.random.random(shape).tolist() dict_val = np.random.random(shape).tolist()
dict_data = dict(zip(dict_key, dict_val)) dict_data = dict(zip(dict_key, dict_val))
return [list_data, dict_data] return [list_data, dict_data]
......
...@@ -796,12 +796,10 @@ class TestCUDNN_FP16(TestConv2DTransposeOp): ...@@ -796,12 +796,10 @@ class TestCUDNN_FP16(TestConv2DTransposeOp):
['Filter'], ['Filter'],
'Output', 'Output',
max_relative_error=0.02, max_relative_error=0.02,
no_grad_set=set(['Input']), no_grad_set={'Input'},
) )
else: else:
self.check_grad( self.check_grad(['Filter'], 'Output', no_grad_set={'Input'})
['Filter'], 'Output', no_grad_set=set(['Input'])
)
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
if self.need_check_grad: if self.need_check_grad:
...@@ -813,12 +811,10 @@ class TestCUDNN_FP16(TestConv2DTransposeOp): ...@@ -813,12 +811,10 @@ class TestCUDNN_FP16(TestConv2DTransposeOp):
['Input'], ['Input'],
'Output', 'Output',
max_relative_error=0.02, max_relative_error=0.02,
no_grad_set=set(['Filter']), no_grad_set={'Filter'},
) )
else: else:
self.check_grad( self.check_grad(['Input'], 'Output', no_grad_set={'Filter'})
['Input'], 'Output', no_grad_set=set(['Filter'])
)
def test_check_grad(self): def test_check_grad(self):
if self.need_check_grad: if self.need_check_grad:
...@@ -827,13 +823,13 @@ class TestCUDNN_FP16(TestConv2DTransposeOp): ...@@ -827,13 +823,13 @@ class TestCUDNN_FP16(TestConv2DTransposeOp):
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_grad_with_place( self.check_grad_with_place(
place, place,
set(['Input', 'Filter']), {'Input', 'Filter'},
'Output', 'Output',
max_relative_error=0.02, max_relative_error=0.02,
) )
else: else:
self.check_grad( self.check_grad(
set(['Input', 'Filter']), 'Output', max_relative_error=0.02 {'Input', 'Filter'}, 'Output', max_relative_error=0.02
) )
...@@ -980,7 +976,7 @@ class TestCUDNN_BF16(TestConv2DTransposeOp): ...@@ -980,7 +976,7 @@ class TestCUDNN_BF16(TestConv2DTransposeOp):
['Filter'], ['Filter'],
'Output', 'Output',
max_relative_error=0.02, max_relative_error=0.02,
no_grad_set=set(['Input']), no_grad_set={'Input'},
user_defined_grads=[numeric_grads], user_defined_grads=[numeric_grads],
) )
...@@ -992,7 +988,7 @@ class TestCUDNN_BF16(TestConv2DTransposeOp): ...@@ -992,7 +988,7 @@ class TestCUDNN_BF16(TestConv2DTransposeOp):
['Input'], ['Input'],
'Output', 'Output',
max_relative_error=0.02, max_relative_error=0.02,
no_grad_set=set(['Filter']), no_grad_set={'Filter'},
user_defined_grads=[numeric_grads], user_defined_grads=[numeric_grads],
) )
......
...@@ -37,7 +37,7 @@ class TestCreateParameterError(unittest.TestCase): ...@@ -37,7 +37,7 @@ class TestCreateParameterError(unittest.TestCase):
def test_attr(): def test_attr():
paddle.create_parameter( paddle.create_parameter(
[1, 2, 3], np.float32, attr=np.array([i for i in range(6)]) [1, 2, 3], np.float32, attr=np.array(list(range(6)))
) )
self.assertRaises(TypeError, test_attr) self.assertRaises(TypeError, test_attr)
...@@ -46,7 +46,7 @@ class TestCreateParameterError(unittest.TestCase): ...@@ -46,7 +46,7 @@ class TestCreateParameterError(unittest.TestCase):
paddle.create_parameter( paddle.create_parameter(
[1, 2, 3], [1, 2, 3],
np.float32, np.float32,
default_initializer=np.array([i for i in range(6)]), default_initializer=np.array(list(range(6))),
) )
self.assertRaises(TypeError, test_default_initializer) self.assertRaises(TypeError, test_default_initializer)
......
...@@ -69,7 +69,7 @@ class TestCropTensorOp(OpTest): ...@@ -69,7 +69,7 @@ class TestCropTensorOp(OpTest):
else: else:
self.attrs['offsets'] = self.offsets self.attrs['offsets'] = self.offsets
crop_shape = [val for val in self.crop_shape] crop_shape = list(self.crop_shape)
for i in range(len(self.crop_shape)): for i in range(len(self.crop_shape)):
if self.crop_shape[i] == -1: if self.crop_shape[i] == -1:
crop_shape[i] = self.x_shape[i] - self.offsets[i] crop_shape[i] = self.x_shape[i] - self.offsets[i]
...@@ -169,7 +169,7 @@ class TestCropTensorOpTensorAttr(OpTest): ...@@ -169,7 +169,7 @@ class TestCropTensorOpTensorAttr(OpTest):
self.attrs['shape'] = self.crop_shape self.attrs['shape'] = self.crop_shape
self.attrs['offsets'] = self.offsets self.attrs['offsets'] = self.offsets
crop_shape = [val for val in self.crop_shape] crop_shape = list(self.crop_shape)
for i in range(len(self.crop_shape)): for i in range(len(self.crop_shape)):
if self.crop_shape[i] == -1: if self.crop_shape[i] == -1:
crop_shape[i] = self.x_shape[i] - self.offsets[i] crop_shape[i] = self.x_shape[i] - self.offsets[i]
......
...@@ -121,8 +121,8 @@ def operator_equal(a, b): ...@@ -121,8 +121,8 @@ def operator_equal(a, b):
continue continue
elif isinstance(v, collections.OrderedDict): elif isinstance(v, collections.OrderedDict):
v0 = sorted(list(v.items()), key=lambda x: x[0]) v0 = sorted(v.items(), key=lambda x: x[0])
v1 = sorted(list(b.__dict__[k].items()), key=lambda x: x[0]) v1 = sorted(b.__dict__[k].items(), key=lambda x: x[0])
if v0 != v1: if v0 != v1:
raise ValueError("In operator_equal not equal:{0}\n".format(k)) raise ValueError("In operator_equal not equal:{0}\n".format(k))
......
...@@ -660,7 +660,7 @@ class TestParallelDyGraphRunnerBase: ...@@ -660,7 +660,7 @@ class TestParallelDyGraphRunnerBase:
if paddle.distributed.get_rank() == 0: if paddle.distributed.get_rank() == 0:
new_batch.append(batch[0]) new_batch.append(batch[0])
elif paddle.distributed.get_rank() == 1: elif paddle.distributed.get_rank() == 1:
new_batch.extend([_ for _ in batch[1:]]) new_batch.extend(list(batch[1:]))
else: else:
raise NotImplementedError( raise NotImplementedError(
"Current TestParallelDyGraphRunnerBase don't support world_size > 2" "Current TestParallelDyGraphRunnerBase don't support world_size > 2"
......
...@@ -34,7 +34,7 @@ class TestLookupTableFuseOp(unittest.TestCase): ...@@ -34,7 +34,7 @@ class TestLookupTableFuseOp(unittest.TestCase):
def check_with_place(self, place): def check_with_place(self, place):
scope = fluid.global_scope() scope = fluid.global_scope()
scope.var("LearningRate").get_tensor().set([0.01], place) scope.var("LearningRate").get_tensor().set([0.01], place)
scope.var("Ids").get_tensor().set([i for i in range(100)], place) scope.var("Ids").get_tensor().set(list(range(100)), place)
init_program = fluid.Program() init_program = fluid.Program()
......
...@@ -40,7 +40,7 @@ def gru( ...@@ -40,7 +40,7 @@ def gru(
for i in range(len(seq_lens)): for i in range(len(seq_lens)):
seq_starts.append(seq_starts[-1] + seq_lens[i]) seq_starts.append(seq_starts[-1] + seq_lens[i])
sorted_seqs = sorted( sorted_seqs = sorted(
list(range(len(seq_lens))), range(len(seq_lens)),
key=functools.cmp_to_key(lambda x, y: seq_lens[y] - seq_lens[x]), key=functools.cmp_to_key(lambda x, y: seq_lens[y] - seq_lens[x]),
) )
num_batch = seq_lens[sorted_seqs[0]] num_batch = seq_lens[sorted_seqs[0]]
......
...@@ -58,7 +58,7 @@ class TestKLDivLossOp(OpTest): ...@@ -58,7 +58,7 @@ class TestKLDivLossOp(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Loss', no_grad_set=set(["Target"])) self.check_grad(['X'], 'Loss', no_grad_set={"Target"})
def initTestCase(self): def initTestCase(self):
self.x_shape = (4, 5, 5) self.x_shape = (4, 5, 5)
......
...@@ -58,7 +58,7 @@ class TestKthvalueOp(OpTest): ...@@ -58,7 +58,7 @@ class TestKthvalueOp(OpTest):
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out') self.check_grad({'X'}, 'Out')
class TestKthvalueOpWithKeepdim(OpTest): class TestKthvalueOpWithKeepdim(OpTest):
...@@ -85,7 +85,7 @@ class TestKthvalueOpWithKeepdim(OpTest): ...@@ -85,7 +85,7 @@ class TestKthvalueOpWithKeepdim(OpTest):
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out') self.check_grad({'X'}, 'Out')
class TestKthvalueOpKernels(unittest.TestCase): class TestKthvalueOpKernels(unittest.TestCase):
......
...@@ -115,7 +115,7 @@ class TestLodAppendOpByAttr(OpTest): ...@@ -115,7 +115,7 @@ class TestLodAppendOpByAttr(OpTest):
lod = [[3, 2, 5]] lod = [[3, 2, 5]]
# target_offset_lod and target_lod are the same lod info represented # target_offset_lod and target_lod are the same lod info represented
# in offset-based format and length-based format, respectively. # in offset-based format and length-based format, respectively.
target_offset_lod = [i for i in range(11)] target_offset_lod = list(range(11))
self.inputs = {'X': (x, lod)} self.inputs = {'X': (x, lod)}
out_lod = [[3, 2, 5], [1] * 10] out_lod = [[3, 2, 5], [1] * 10]
# The `target_lod` attribute is still based on offset # The `target_lod` attribute is still based on offset
......
...@@ -128,7 +128,7 @@ class TestLookupTableBF16OpWIsSelectedRows(unittest.TestCase): ...@@ -128,7 +128,7 @@ class TestLookupTableBF16OpWIsSelectedRows(unittest.TestCase):
self.place = core.CPUPlace() self.place = core.CPUPlace()
def prepare_w(self): def prepare_w(self):
rows = [a for a in range(self.w_bf16.shape[0])] rows = list(range(self.w_bf16.shape[0]))
row_numel = self.w_bf16.shape[1] row_numel = self.w_bf16.shape[1]
w_selected_rows = self.scope.var('W').get_selected_rows() w_selected_rows = self.scope.var('W').get_selected_rows()
......
...@@ -65,14 +65,14 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): ...@@ -65,14 +65,14 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
elif X.ndim == 2: elif X.ndim == 2:
X = X.T X = X.T
else: else:
dim = [i for i in range(len(X.shape))] dim = list(range(len(X.shape)))
dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1] dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1]
X = np.transpose(X, tuple(dim)) X = np.transpose(X, tuple(dim))
if transpose_Y: if transpose_Y:
if Y.ndim == 1: if Y.ndim == 1:
Y = Y.reshape((1, Y.size)) Y = Y.reshape((1, Y.size))
else: else:
dim = [i for i in range(len(Y.shape))] dim = list(range(len(Y.shape)))
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1] dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
Y = np.transpose(Y, tuple(dim)) Y = np.transpose(Y, tuple(dim))
......
...@@ -33,14 +33,14 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): ...@@ -33,14 +33,14 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
elif X.ndim == 2: elif X.ndim == 2:
X = X.T X = X.T
else: else:
dim = [i for i in range(len(X.shape))] dim = list(range(len(X.shape)))
dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1] dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1]
X = np.transpose(X, tuple(dim)) X = np.transpose(X, tuple(dim))
if transpose_Y: if transpose_Y:
if Y.ndim == 1: if Y.ndim == 1:
Y = Y.reshape((Y.size,)) Y = Y.reshape((Y.size,))
else: else:
dim = [i for i in range(len(Y.shape))] dim = list(range(len(Y.shape)))
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1] dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
Y = np.transpose(Y, tuple(dim)) Y = np.transpose(Y, tuple(dim))
......
...@@ -78,7 +78,7 @@ class TestModeOp(OpTest): ...@@ -78,7 +78,7 @@ class TestModeOp(OpTest):
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out') self.check_grad({'X'}, 'Out')
class TestModeOpLastdim(OpTest): class TestModeOpLastdim(OpTest):
...@@ -103,7 +103,7 @@ class TestModeOpLastdim(OpTest): ...@@ -103,7 +103,7 @@ class TestModeOpLastdim(OpTest):
def test_check_grad(self): def test_check_grad(self):
paddle.enable_static() paddle.enable_static()
self.check_grad(set(['X']), 'Out') self.check_grad({'X'}, 'Out')
class TestModeOpKernels(unittest.TestCase): class TestModeOpKernels(unittest.TestCase):
......
...@@ -86,9 +86,7 @@ class TestRegisteredPhiKernels(unittest.TestCase): ...@@ -86,9 +86,7 @@ class TestRegisteredPhiKernels(unittest.TestCase):
def test_registered_phi_kernels(self): def test_registered_phi_kernels(self):
phi_function_kernel_infos = core._get_registered_phi_kernels("function") phi_function_kernel_infos = core._get_registered_phi_kernels("function")
registered_kernel_list = [ registered_kernel_list = list(phi_function_kernel_infos.keys())
name for name in phi_function_kernel_infos.keys()
]
forward_kernels = get_all_kernels( forward_kernels = get_all_kernels(
self.forward_ops, registered_kernel_list self.forward_ops, registered_kernel_list
) )
......
...@@ -193,7 +193,7 @@ class TestSparseGradParamSGDOpBF16(TestSparseSGDOpBF16): ...@@ -193,7 +193,7 @@ class TestSparseGradParamSGDOpBF16(TestSparseSGDOpBF16):
self.grad_height = 10 self.grad_height = 10
self.grad_rows = [0, 4, 7] self.grad_rows = [0, 4, 7]
self.grad_row_numel = 12 self.grad_row_numel = 12
self.param_rows = [a for a in range(self.grad_height)] self.param_rows = list(range(self.grad_height))
def test_sparse_param_grad_sgd(self): def test_sparse_param_grad_sgd(self):
scope = core.Scope() scope = core.Scope()
...@@ -228,7 +228,7 @@ class TestSparseGradParamSGDOpBF16Case2(TestSparseGradParamSGDOpBF16): ...@@ -228,7 +228,7 @@ class TestSparseGradParamSGDOpBF16Case2(TestSparseGradParamSGDOpBF16):
self.grad_height = 14 self.grad_height = 14
self.grad_rows = [1, 4, 12, 7, 8] self.grad_rows = [1, 4, 12, 7, 8]
self.grad_row_numel = 16 self.grad_row_numel = 16
self.param_rows = [a for a in range(self.grad_height)] self.param_rows = list(range(self.grad_height))
@OpTestTool.skip_if_not_cpu_bf16() @OpTestTool.skip_if_not_cpu_bf16()
......
...@@ -23,7 +23,7 @@ import paddle ...@@ -23,7 +23,7 @@ import paddle
def common_setup(self, index_num, nshards, shard_id, ignore_value): def common_setup(self, index_num, nshards, shard_id, ignore_value):
self.op_type = 'shard_index' self.op_type = 'shard_index'
self.python_api = paddle.tensor.shard_index self.python_api = paddle.tensor.shard_index
x_lod = [[i for i in range(10)]] x_lod = [list(range(10))]
N = sum(x_lod[0]) N = sum(x_lod[0])
x = [np.random.randint(0, index_num - 1) for i in range(N)] x = [np.random.randint(0, index_num - 1) for i in range(N)]
x = np.array(x).astype('int32').reshape([N, 1]) x = np.array(x).astype('int32').reshape([N, 1])
......
...@@ -102,7 +102,7 @@ class TestSmoothL1LossOp2(OpTest): ...@@ -102,7 +102,7 @@ class TestSmoothL1LossOp2(OpTest):
['Y'], ['Y'],
'Out', 'Out',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight']), no_grad_set={'X', 'InsideWeight', 'OutsideWeight'},
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -110,7 +110,7 @@ class TestSmoothL1LossOp2(OpTest): ...@@ -110,7 +110,7 @@ class TestSmoothL1LossOp2(OpTest):
['X'], ['X'],
'Out', 'Out',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight']), no_grad_set={'Y', 'InsideWeight', 'OutsideWeight'},
) )
......
...@@ -274,16 +274,14 @@ class TestLoDTensorAndSelectedRowsOp(TestSelectedRowsSumOp): ...@@ -274,16 +274,14 @@ class TestLoDTensorAndSelectedRowsOp(TestSelectedRowsSumOp):
self.assertEqual(out_t.shape[0], self.height) self.assertEqual(out_t.shape[0], self.height)
np.testing.assert_array_equal( np.testing.assert_array_equal(
out_t, out_t,
self._get_array([i for i in range(self.height)], self.row_numel) self._get_array(list(range(self.height)), self.row_numel)
* np.tile(np.array(result).reshape(self.height, 1), self.row_numel), * np.tile(np.array(result).reshape(self.height, 1), self.row_numel),
) )
def create_lod_tensor(self, scope, place, var_name): def create_lod_tensor(self, scope, place, var_name):
var = scope.var(var_name) var = scope.var(var_name)
w_tensor = var.get_tensor() w_tensor = var.get_tensor()
w_array = self._get_array( w_array = self._get_array(list(range(self.height)), self.row_numel)
[i for i in range(self.height)], self.row_numel
)
w_tensor.set(w_array, place) w_tensor.set(w_array, place)
return var return var
......
...@@ -31,10 +31,10 @@ def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod): ...@@ -31,10 +31,10 @@ def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod):
offset = 0 offset = 0
for n in range(batch_size): for n in range(batch_size):
gt_num = gt_lod[n] gt_num = gt_lod[n]
ids = random.sample([i for i in range(num_prior)], gt_num) ids = random.sample(list(range(num_prior)), gt_num)
match_indices[n, ids] = [i for i in range(gt_num)] match_indices[n, ids] = list(range(gt_num))
ret_ids = {i for i in range(num_prior)} - set(ids) ret_ids = set(range(num_prior)) - set(ids)
l = neg_lod[n] l = neg_lod[n]
neg_ids = random.sample(ret_ids, l) neg_ids = random.sample(ret_ids, l)
neg_indices[offset : offset + neg_lod[n], :] = ( neg_indices[offset : offset + neg_lod[n], :] = (
......
...@@ -122,14 +122,10 @@ cases = { ...@@ -122,14 +122,10 @@ cases = {
for _op_type in ['tril', 'triu']: for _op_type in ['tril', 'triu']:
for _expected, _params in cases.items(): for _expected, _params in cases.items():
for _Xshape, _diaglist in _params.items(): for _Xshape, _diaglist in _params.items():
list( [
map( case_generator(_op_type, _Xshape, _diagonal, _expected)
lambda _diagonal: case_generator( for _diagonal in _diaglist
_op_type, _Xshape, _diagonal, _expected ]
),
_diaglist,
)
)
class TestTrilTriuOpAPI(unittest.TestCase): class TestTrilTriuOpAPI(unittest.TestCase):
......
...@@ -483,14 +483,10 @@ class BertTokenizer(PretrainedTokenizer): ...@@ -483,14 +483,10 @@ class BertTokenizer(PretrainedTokenizer):
"You should not supply a second sequence if the provided sequence of " "You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." "ids is already formatted with special tokens for the model."
) )
return list( return [
map( 1 if x in [self.sep_token_id, self.cls_token_id] else 0
lambda x: 1 for x in token_ids_0
if x in [self.sep_token_id, self.cls_token_id] ]
else 0,
token_ids_0,
)
)
if token_ids_1 is not None: if token_ids_1 is not None:
return ( return (
......
...@@ -93,7 +93,7 @@ class XPUTestCoalesceTensorOp(XPUOpTestWrapper): ...@@ -93,7 +93,7 @@ class XPUTestCoalesceTensorOp(XPUOpTestWrapper):
out[0:length] = input[1].flatten() out[0:length] = input[1].flatten()
inputs.append(out) inputs.append(out)
coalesce_tensor_var = np.concatenate([input for input in inputs]) coalesce_tensor_var = np.concatenate(list(inputs))
if set_constant: if set_constant:
coalesce_tensor_var = ( coalesce_tensor_var = (
np.ones(len(coalesce_tensor_var)) * constant np.ones(len(coalesce_tensor_var)) * constant
......
...@@ -39,7 +39,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): ...@@ -39,7 +39,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
elif X.ndim == 2: elif X.ndim == 2:
X = X.T X = X.T
else: else:
dim = [i for i in range(len(X.shape))] dim = list(range(len(X.shape)))
dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1] dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1]
X = np.transpose(X, tuple(dim)) X = np.transpose(X, tuple(dim))
if transpose_Y: if transpose_Y:
...@@ -48,7 +48,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): ...@@ -48,7 +48,7 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
elif Y.ndim == 2: elif Y.ndim == 2:
Y = Y.T Y = Y.T
else: else:
dim = [i for i in range(len(Y.shape))] dim = list(range(len(Y.shape)))
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1] dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
Y = np.transpose(Y, tuple(dim)) Y = np.transpose(Y, tuple(dim))
......
...@@ -38,14 +38,14 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): ...@@ -38,14 +38,14 @@ def reference_matmul(X, Y, transpose_X=False, transpose_Y=False):
elif X.ndim == 2: elif X.ndim == 2:
X = X.T X = X.T
else: else:
dim = [i for i in range(len(X.shape))] dim = list(range(len(X.shape)))
dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1] dim[-1], dim[len(X.shape) - 2] = dim[len(X.shape) - 2], dim[-1]
X = np.transpose(X, tuple(dim)) X = np.transpose(X, tuple(dim))
if transpose_Y: if transpose_Y:
if Y.ndim == 1: if Y.ndim == 1:
Y = Y.reshape((Y.size,)) Y = Y.reshape((Y.size,))
else: else:
dim = [i for i in range(len(Y.shape))] dim = list(range(len(Y.shape)))
dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1] dim[-1], dim[len(Y.shape) - 2] = dim[len(Y.shape) - 2], dim[-1]
Y = np.transpose(Y, tuple(dim)) Y = np.transpose(Y, tuple(dim))
Out = np.matmul(X, Y) Out = np.matmul(X, Y)
......
...@@ -250,7 +250,7 @@ class TestLoDTensorAndSelectedRowsOp(unittest.TestCase): ...@@ -250,7 +250,7 @@ class TestLoDTensorAndSelectedRowsOp(unittest.TestCase):
self.assertEqual(out_t.shape[0], self.height) self.assertEqual(out_t.shape[0], self.height)
np.testing.assert_array_equal( np.testing.assert_array_equal(
out_t, out_t,
self._get_array([i for i in range(self.height)], self.row_numel) self._get_array(list(range(self.height)), self.row_numel)
* np.tile(np.array(result).reshape(self.height, 1), self.row_numel), * np.tile(np.array(result).reshape(self.height, 1), self.row_numel),
) )
...@@ -281,9 +281,7 @@ class TestLoDTensorAndSelectedRowsOp(unittest.TestCase): ...@@ -281,9 +281,7 @@ class TestLoDTensorAndSelectedRowsOp(unittest.TestCase):
return var return var
def create_lod_tensor(self, place): def create_lod_tensor(self, place):
w_array = self._get_array( w_array = self._get_array(list(range(self.height)), self.row_numel)
[i for i in range(self.height)], self.row_numel
)
return paddle.to_tensor(w_array) return paddle.to_tensor(w_array)
def test_w_is_selected_rows(self): def test_w_is_selected_rows(self):
......
...@@ -71,7 +71,7 @@ def config_callbacks( ...@@ -71,7 +71,7 @@ def config_callbacks(
class CallbackList: class CallbackList:
def __init__(self, callbacks=None): def __init__(self, callbacks=None):
# copy # copy
self.callbacks = [c for c in callbacks] self.callbacks = list(callbacks)
self.params = {} self.params = {}
self.model = None self.model = None
......
...@@ -315,7 +315,7 @@ def rsqrt_orig2prim(op, x): ...@@ -315,7 +315,7 @@ def rsqrt_orig2prim(op, x):
@REGISTER_ORIG2PRIM('matmul_v2') @REGISTER_ORIG2PRIM('matmul_v2')
def matmul_v2_orig2prim(op, x, y): def matmul_v2_orig2prim(op, x, y):
def trans(shape): def trans(shape):
ret = [i for i in range(len(shape))] ret = list(range(len(shape)))
ret[-1], ret[-2] = ret[-2], ret[-1] ret[-1], ret[-2] = ret[-2], ret[-1]
return ret return ret
......
...@@ -178,9 +178,7 @@ def _append_pserver_ops( ...@@ -178,9 +178,7 @@ def _append_pserver_ops(
merged_vars = [] merged_vars = []
merged_ordervars = [] merged_ordervars = []
param_vars = [ param_vars = list(config.param_grad_ep_mapping[endpoint]["params"])
p for p in config.param_grad_ep_mapping[endpoint]["params"]
]
for var in param_vars: for var in param_vars:
name = var.name name = var.name
...@@ -1066,7 +1064,7 @@ def build_pserver_startup_program_pass(program, p_main_program, config): ...@@ -1066,7 +1064,7 @@ def build_pserver_startup_program_pass(program, p_main_program, config):
def add_geo_optimizer_pass(program, config): def add_geo_optimizer_pass(program, config):
endpoint = config.get_ps_endpoint() endpoint = config.get_ps_endpoint()
params = [p for p in config.param_grad_ep_mapping[endpoint]["params"]] params = list(config.param_grad_ep_mapping[endpoint]["params"])
sparse_tablenames = get_sparse_tablenames( sparse_tablenames = get_sparse_tablenames(
config.get_origin_main_program(), False config.get_origin_main_program(), False
......
...@@ -2087,7 +2087,7 @@ def find_op_input_output(program, block, op): ...@@ -2087,7 +2087,7 @@ def find_op_input_output(program, block, op):
def get_vars_name_in_block(block): def get_vars_name_in_block(block):
vars_list = block.vars.keys() vars_list = block.vars.keys()
vars_name_list = [var_name for var_name in vars_list] vars_name_list = list(vars_list)
return vars_name_list return vars_name_list
......
...@@ -542,7 +542,7 @@ class PSLib(Fleet): ...@@ -542,7 +542,7 @@ class PSLib(Fleet):
for i in tp.dense_table: for i in tp.dense_table:
if table_id is not None and table_id != i.table_id: if table_id is not None and table_id != i.table_id:
continue continue
var_list = [var for var in i.dense_variable_name] var_list = list(i.dense_variable_name)
skip = False skip = False
for var in var_list: for var in var_list:
if scope.find_var(var) is None: if scope.find_var(var) is None:
...@@ -751,7 +751,7 @@ class PSLib(Fleet): ...@@ -751,7 +751,7 @@ class PSLib(Fleet):
for i in tp.dense_table: for i in tp.dense_table:
if table_id is not None and table_id != i.table_id: if table_id is not None and table_id != i.table_id:
continue continue
table_var_names = [var for var in i.dense_variable_name] table_var_names = list(i.dense_variable_name)
skip = False skip = False
for var in table_var_names: for var in table_var_names:
if scope.find_var(var) is None: if scope.find_var(var) is None:
......
...@@ -744,8 +744,8 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -744,8 +744,8 @@ class DistributedAdam(DistributedOptimizerImplBase):
] ]
program_configs[program_id] = { program_configs[program_id] = {
"pull_sparse": [t_index for t_index in sparse_table_index], "pull_sparse": list(sparse_table_index),
"push_sparse": [t_index for t_index in sparse_table_index], "push_sparse": list(sparse_table_index),
} }
params_grads = prog_id_to_param_grads[program_id] params_grads = prog_id_to_param_grads[program_id]
......
...@@ -941,7 +941,7 @@ class GeneralRoleMaker(RoleMakerBase): ...@@ -941,7 +941,7 @@ class GeneralRoleMaker(RoleMakerBase):
""" """
if not self._role_is_generated: if not self._role_is_generated:
self.generate_role() self.generate_role()
input_list = [i for i in input] input_list = list(input)
ans = self._node_type_comm.all_reduce(input_list, mode) ans = self._node_type_comm.all_reduce(input_list, mode)
for i in range(len(ans)): for i in range(len(ans)):
output[i] = ans[i] output[i] = ans[i]
......
...@@ -96,7 +96,7 @@ def check_pruned_program_vars(train_prog, pruned_prog): ...@@ -96,7 +96,7 @@ def check_pruned_program_vars(train_prog, pruned_prog):
if io_utils.is_persistable(v) if io_utils.is_persistable(v)
] ]
pruned_vars = OrderedDict(pruned_vars) pruned_vars = OrderedDict(pruned_vars)
pruned_vars_name = [name for name in pruned_vars] pruned_vars_name = list(pruned_vars)
logger.info( logger.info(
"persistable vars in pruned program: {}".format(pruned_vars_name) "persistable vars in pruned program: {}".format(pruned_vars_name)
) )
...@@ -497,7 +497,7 @@ def parse_program(program, output_dir): ...@@ -497,7 +497,7 @@ def parse_program(program, output_dir):
f.write("\n") f.write("\n")
# all vars # all vars
all_vars = [v for v in program.list_vars()] all_vars = list(program.list_vars())
output["all_vars"] = [ output["all_vars"] = [
{ {
'name': str(v.name), 'name': str(v.name),
......
...@@ -55,7 +55,7 @@ def indexable(x, code=None): ...@@ -55,7 +55,7 @@ def indexable(x, code=None):
if isinstance(x, Variable): if isinstance(x, Variable):
return x return x
elif hasattr(x, '__iter__'): elif hasattr(x, '__iter__'):
return [i for i in x] return list(x)
elif hasattr(x, '__len__') and hasattr( elif hasattr(x, '__len__') and hasattr(
x, '__getitem__' x, '__getitem__'
): # used for customed type and non-iterable type. ): # used for customed type and non-iterable type.
...@@ -575,14 +575,14 @@ class VariableTuple: ...@@ -575,14 +575,14 @@ class VariableTuple:
def convert_enumerate(*args): def convert_enumerate(*args):
has_variable = any(map(lambda x: isinstance(x, Variable), args)) has_variable = any(isinstance(x, Variable) for x in args)
if has_variable: if has_variable:
return VariableTuple(*args) return VariableTuple(*args)
return enumerate(*args) return enumerate(*args)
def convert_range(*args): def convert_range(*args):
has_variable = any(map(lambda x: isinstance(x, Variable), args)) has_variable = any(isinstance(x, Variable) for x in args)
if has_variable: if has_variable:
if len(args) == 1: if len(args) == 1:
return paddle.arange(0, args[0], 1, paddle.int64) return paddle.arange(0, args[0], 1, paddle.int64)
......
...@@ -1279,7 +1279,7 @@ class FunctionNameLivenessAnalysis(gast.NodeVisitor): ...@@ -1279,7 +1279,7 @@ class FunctionNameLivenessAnalysis(gast.NodeVisitor):
assert isinstance( assert isinstance(
node, gast.FunctionDef node, gast.FunctionDef
), "Input node is not function define node" ), "Input node is not function define node"
names = [a for a in node.args.args] names = list(node.args.args)
names.append(node.args.vararg) names.append(node.args.vararg)
names.append(node.args.kwarg) names.append(node.args.kwarg)
names = [i.id for i in names if i is not None] names = [i.id for i in names if i is not None]
...@@ -1387,8 +1387,8 @@ class GetterSetterHelper: ...@@ -1387,8 +1387,8 @@ class GetterSetterHelper:
""" """
def __init__(self, getter_func, setter_func, *name_lists): def __init__(self, getter_func, setter_func, *name_lists):
name_lists = map(lambda x: [] if x is None else x, name_lists) name_lists = ([] if x is None else x for x in name_lists)
name_sets = map(lambda x: set(x), name_lists) name_sets = (set(x) for x in name_lists)
self._union = list( self._union = list(
functools.reduce(lambda x, y: x | y, name_sets, set()) functools.reduce(lambda x, y: x | y, name_sets, set())
) )
...@@ -1412,7 +1412,7 @@ class GetterSetterHelper: ...@@ -1412,7 +1412,7 @@ class GetterSetterHelper:
), "the name `{}` not in name union set`{}`.".format( ), "the name `{}` not in name union set`{}`.".format(
n, self.name2id.keys() n, self.name2id.keys()
) )
return tuple(map(lambda n: vars[self.name2id[n]], names)) return tuple(vars[self.name2id[n]] for n in names)
def set(self, names, values): def set(self, names, values):
if names is None: if names is None:
...@@ -1429,7 +1429,7 @@ class GetterSetterHelper: ...@@ -1429,7 +1429,7 @@ class GetterSetterHelper:
n, self.name2id.keys() n, self.name2id.keys()
) )
vars = list(vars) vars = list(vars)
indices = list(map(lambda n: self.name2id[n], names)) indices = [self.name2id[n] for n in names]
for i, v in zip(indices, values): for i, v in zip(indices, values):
vars[i] = v vars[i] = v
self.setter(vars) self.setter(vars)
......
...@@ -329,7 +329,7 @@ class While: ...@@ -329,7 +329,7 @@ class While:
if inner_var: if inner_var:
out_vars.append(inner_var) out_vars.append(inner_var)
x_name_list |= set(map(lambda x: x.name, out_vars)) x_name_list |= {x.name for x in out_vars}
# NOTE(dev): cond_var has been contained in Input('Condition'), so # NOTE(dev): cond_var has been contained in Input('Condition'), so
# we remove it from Input('X') # we remove it from Input('X')
x_name_list -= {self.cond_var.name} x_name_list -= {self.cond_var.name}
......
...@@ -75,12 +75,11 @@ def _remove_unused_var_nodes(graph): ...@@ -75,12 +75,11 @@ def _remove_unused_var_nodes(graph):
all_used_vars.add(output_node) all_used_vars.add(output_node)
all_used_vars = {n.node for n in all_used_vars} all_used_vars = {n.node for n in all_used_vars}
all_unused_vars = { all_unused_vars = set(
n filter(
for n in filter(
lambda node: node.node not in all_used_vars, graph.all_var_nodes() lambda node: node.node not in all_used_vars, graph.all_var_nodes()
) )
} )
graph.safe_remove_nodes(all_unused_vars) graph.safe_remove_nodes(all_unused_vars)
return graph return graph
......
...@@ -532,13 +532,12 @@ class Quant2Int8MkldnnPass: ...@@ -532,13 +532,12 @@ class Quant2Int8MkldnnPass:
all_used_vars.add(output_node) all_used_vars.add(output_node)
all_used_vars = {n.node for n in all_used_vars} all_used_vars = {n.node for n in all_used_vars}
all_unused_vars = { all_unused_vars = set(
n filter(
for n in filter(
lambda node: node.node not in all_used_vars, lambda node: node.node not in all_used_vars,
graph.all_var_nodes(), graph.all_var_nodes(),
) )
} )
graph.safe_remove_nodes(all_unused_vars) graph.safe_remove_nodes(all_unused_vars)
return graph return graph
......
...@@ -280,11 +280,10 @@ class QuantInt8MkldnnPass: ...@@ -280,11 +280,10 @@ class QuantInt8MkldnnPass:
all_used_vars.add(output_node) all_used_vars.add(output_node)
all_used_vars = {n.node for n in all_used_vars} all_used_vars = {n.node for n in all_used_vars}
all_unused_vars = { all_unused_vars = set(
n filter(
for n in filter(
lambda node: node.node not in all_used_vars, lambda node: node.node not in all_used_vars,
graph.all_var_nodes(), graph.all_var_nodes(),
) )
} )
graph.safe_remove_nodes(all_unused_vars) graph.safe_remove_nodes(all_unused_vars)
...@@ -1407,13 +1407,12 @@ class QuantizationFreezePass: ...@@ -1407,13 +1407,12 @@ class QuantizationFreezePass:
all_used_vars.add(output_node) all_used_vars.add(output_node)
all_used_vars = {n.node for n in all_used_vars} all_used_vars = {n.node for n in all_used_vars}
all_unused_vars = { all_unused_vars = set(
n filter(
for n in filter(
lambda node: node.node not in all_used_vars, lambda node: node.node not in all_used_vars,
graph.all_var_nodes(), graph.all_var_nodes(),
) )
} )
graph.safe_remove_nodes(all_unused_vars) graph.safe_remove_nodes(all_unused_vars)
def _original_var_name(self, var_name): def _original_var_name(self, var_name):
...@@ -1524,13 +1523,12 @@ class ConvertToInt8Pass: ...@@ -1524,13 +1523,12 @@ class ConvertToInt8Pass:
all_used_vars.add(output_node) all_used_vars.add(output_node)
all_used_vars = {n.node for n in all_used_vars} all_used_vars = {n.node for n in all_used_vars}
all_unused_vars = { all_unused_vars = set(
n filter(
for n in filter(
lambda node: node.node not in all_used_vars, lambda node: node.node not in all_used_vars,
graph.all_var_nodes(), graph.all_var_nodes(),
) )
} )
graph.safe_remove_nodes(all_unused_vars) graph.safe_remove_nodes(all_unused_vars)
...@@ -3224,13 +3222,12 @@ class QuantWeightPass: ...@@ -3224,13 +3222,12 @@ class QuantWeightPass:
all_used_vars.add(output_node) all_used_vars.add(output_node)
all_used_vars = {n.node for n in all_used_vars} all_used_vars = {n.node for n in all_used_vars}
all_unused_vars = { all_unused_vars = set(
n filter(
for n in filter(
lambda node: node.node not in all_used_vars, lambda node: node.node not in all_used_vars,
graph.all_var_nodes(), graph.all_var_nodes(),
) )
} )
graph.safe_remove_nodes(all_unused_vars) graph.safe_remove_nodes(all_unused_vars)
def _load_var(self, name): def _load_var(self, name):
......
...@@ -246,7 +246,7 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): ...@@ -246,7 +246,7 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase):
if iters == skip_batch_num: if iters == skip_batch_num:
total_samples = 0 total_samples = 0
infer_start_time = time.time() infer_start_time = time.time()
images = list(map(lambda x: x[0].reshape(dshape), data)) images = [x[0].reshape(dshape) for x in data]
images = np.array(images).astype('float32') images = np.array(images).astype('float32')
labels = np.array([x[1] for x in data]).astype('int64') labels = np.array([x[1] for x in data]).astype('int64')
......
...@@ -214,7 +214,7 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase): ...@@ -214,7 +214,7 @@ class QuantInt8ImageClassificationComparisonTest(unittest.TestCase):
if iters == skip_batch_num: if iters == skip_batch_num:
total_samples = 0 total_samples = 0
infer_start_time = time.time() infer_start_time = time.time()
images = list(map(lambda x: x[0].reshape(dshape), data)) images = [x[0].reshape(dshape) for x in data]
images = np.array(images).astype('float32') images = np.array(images).astype('float32')
labels = np.array([x[1] for x in data]).astype('int64') labels = np.array([x[1] for x in data]).astype('int64')
......
...@@ -232,7 +232,7 @@ def build_global_view(nop_labels, rhs, n_bcast_dims): ...@@ -232,7 +232,7 @@ def build_global_view(nop_labels, rhs, n_bcast_dims):
g_labels_sum = ''.join(labels) g_labels_sum = ''.join(labels)
g_labels = g_labels_out + g_labels_sum g_labels = g_labels_out + g_labels_sum
g_view = list(map(lambda i: build_view(i, g_labels), nop_labels)) g_view = [build_view(i, g_labels) for i in nop_labels]
g_nout = len(g_labels_out) g_nout = len(g_labels_out)
g_count = count g_count = count
...@@ -741,7 +741,7 @@ def parse_fake_shape(equation, operands, labels): ...@@ -741,7 +741,7 @@ def parse_fake_shape(equation, operands, labels):
list of shape list of shape
""" """
origin_labels = map(lambda x: x.strip(), equation.split(',')) origin_labels = (x.strip() for x in equation.split(','))
shaped = collections.namedtuple('shaped', ['shape']) shaped = collections.namedtuple('shaped', ['shape'])
def fake_shape(ori_label, label, op): def fake_shape(ori_label, label, op):
...@@ -1047,7 +1047,7 @@ def einsum(equation, *operands): ...@@ -1047,7 +1047,7 @@ def einsum(equation, *operands):
# To handle broadcasting, we should first know how many dimensions are there # To handle broadcasting, we should first know how many dimensions are there
# We need to use that number to generate output labels # We need to use that number to generate output labels
# e.g. 1 for ['ij', 'i.', '.k'] # e.g. 1 for ['ij', 'i.', '.k']
n_bcast_dims = max(map(lambda s: s.count('.'), nop_labels)) n_bcast_dims = max(s.count('.') for s in nop_labels)
# Build the data structures for planning. It's helpful to think of all the operands # Build the data structures for planning. It's helpful to think of all the operands
# broadcasting together from a global view. In this view, dimensions from multiple # broadcasting together from a global view. In this view, dimensions from multiple
......
...@@ -128,9 +128,7 @@ def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None): ...@@ -128,9 +128,7 @@ def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None):
op = stack if use_stack else concat op = stack if use_stack else concat
res = op(input, axis=axis) res = op(input, axis=axis)
sizes = paddle.to_tensor( sizes = paddle.to_tensor(np.array([int(x.shape[axis]) for x in input]))
np.array(list(map(lambda x: int(x.shape[axis]), input)))
)
return res, sizes return res, sizes
else: else:
check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor') check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor')
...@@ -335,7 +333,7 @@ def slice(input, axes, starts, ends): ...@@ -335,7 +333,7 @@ def slice(input, axes, starts, ends):
] ]
elif isinstance(starts, tmp_tensor_type): elif isinstance(starts, tmp_tensor_type):
tensor_t = starts.numpy(False) tensor_t = starts.numpy(False)
starts = [ele for ele in tensor_t] starts = list(tensor_t)
infer_flags = [-1 for i in range(len(axes))] infer_flags = [-1 for i in range(len(axes))]
if isinstance(ends, (list, tuple)): if isinstance(ends, (list, tuple)):
...@@ -345,7 +343,7 @@ def slice(input, axes, starts, ends): ...@@ -345,7 +343,7 @@ def slice(input, axes, starts, ends):
] ]
elif isinstance(ends, tmp_tensor_type): elif isinstance(ends, tmp_tensor_type):
tensor_t = ends.numpy(False) tensor_t = ends.numpy(False)
ends = [ele for ele in tensor_t] ends = list(tensor_t)
infer_flags = [-1 for i in range(len(axes))] infer_flags = [-1 for i in range(len(axes))]
return _C_ops.slice(input, axes, starts, ends, infer_flags, []) return _C_ops.slice(input, axes, starts, ends, infer_flags, [])
...@@ -2048,12 +2046,10 @@ def split(x, num_or_sections, axis=0, name=None): ...@@ -2048,12 +2046,10 @@ def split(x, num_or_sections, axis=0, name=None):
len(num_or_sections) <= input_shape[dim] len(num_or_sections) <= input_shape[dim]
), 'len(num_or_sections) must not be more than input.shape[dim].' ), 'len(num_or_sections) must not be more than input.shape[dim].'
num = len(num_or_sections) num = len(num_or_sections)
attrs['sections'] = list( attrs['sections'] = [
map( -1 if isinstance(ele, Variable) else ele
lambda ele: -1 if isinstance(ele, Variable) else ele, for ele in num_or_sections
num_or_sections, ]
)
)
if paddle.utils._contain_var(num_or_sections): if paddle.utils._contain_var(num_or_sections):
inputs['SectionsTensorList'] = _get_SectionsTensorList( inputs['SectionsTensorList'] = _get_SectionsTensorList(
num_or_sections num_or_sections
......
...@@ -115,8 +115,8 @@ def _to_summary(var): ...@@ -115,8 +115,8 @@ def _to_summary(var):
else: else:
# recursively handle all dimensions # recursively handle all dimensions
if var.shape[0] > 2 * edgeitems: if var.shape[0] > 2 * edgeitems:
begin = [x for x in var[:edgeitems]] begin = list(var[:edgeitems])
end = [x for x in var[(-1 * edgeitems) :]] end = list(var[(-1 * edgeitems) :])
return np.stack([_to_summary(x) for x in (begin + end)]) return np.stack([_to_summary(x) for x in (begin + end)])
else: else:
return np.stack([_to_summary(x) for x in var]) return np.stack([_to_summary(x) for x in var])
......
...@@ -313,8 +313,8 @@ def _recursive_assert_same_structure(nest1, nest2, check_types): ...@@ -313,8 +313,8 @@ def _recursive_assert_same_structure(nest1, nest2, check_types):
keys1, keys2 keys1, keys2
) )
) )
nest1_as_sequence = [n for n in _yield_value(nest1)] nest1_as_sequence = list(_yield_value(nest1))
nest2_as_sequence = [n for n in _yield_value(nest2)] nest2_as_sequence = list(_yield_value(nest2))
for n1, n2 in zip(nest1_as_sequence, nest2_as_sequence): for n1, n2 in zip(nest1_as_sequence, nest2_as_sequence):
_recursive_assert_same_structure(n1, n2, check_types) _recursive_assert_same_structure(n1, n2, check_types)
...@@ -454,12 +454,7 @@ def convert_shape_to_list(shape): ...@@ -454,12 +454,7 @@ def convert_shape_to_list(shape):
Convert shape(list, tuple, variable) to list in imperative mode Convert shape(list, tuple, variable) to list in imperative mode
""" """
if isinstance(shape, (list, tuple)): if isinstance(shape, (list, tuple)):
shape = list( shape = [x.item(0) if isinstance(x, Variable) else x for x in shape]
map(
lambda x: x.item(0) if isinstance(x, Variable) else x,
shape,
)
)
else: else:
shape = shape.astype(int).tolist() shape = shape.astype(int).tolist()
return shape return shape
......
...@@ -636,7 +636,7 @@ class TestHessianBatchFirst(unittest.TestCase): ...@@ -636,7 +636,7 @@ class TestHessianBatchFirst(unittest.TestCase):
np.array(expected), np.array(expected),
(xs_len, xs_len, self.nrow, self.nbatch, self.nrow), (xs_len, xs_len, self.nrow, self.nbatch, self.nrow),
) )
expected = [[n for n in row] for row in expected] expected = [list(row) for row in expected]
expected = utils._np_concat_matrix_sequence(expected) expected = utils._np_concat_matrix_sequence(expected)
self.x.stop_gradient = False self.x.stop_gradient = False
...@@ -662,7 +662,7 @@ class TestHessianBatchFirst(unittest.TestCase): ...@@ -662,7 +662,7 @@ class TestHessianBatchFirst(unittest.TestCase):
np.array(expected), np.array(expected),
(xs_len, xs_len, self.nrow, self.nbatch, self.nrow), (xs_len, xs_len, self.nrow, self.nbatch, self.nrow),
) )
expected = [[n for n in row] for row in expected] expected = [list(row) for row in expected]
expected = utils._np_concat_matrix_sequence(expected) expected = utils._np_concat_matrix_sequence(expected)
expected = utils._np_transpose_matrix_format( expected = utils._np_transpose_matrix_format(
expected, utils.MatrixFormat.NBM, utils.MatrixFormat.BNM expected, utils.MatrixFormat.NBM, utils.MatrixFormat.BNM
......
...@@ -131,7 +131,7 @@ def concat_to_matrix(xs, is_batched=False): ...@@ -131,7 +131,7 @@ def concat_to_matrix(xs, is_batched=False):
"""Concats a tuple of tuple of Jacobian/Hessian matrix into one matrix""" """Concats a tuple of tuple of Jacobian/Hessian matrix into one matrix"""
rows = [] rows = []
for i in range(len(xs)): for i in range(len(xs)):
rows.append(np.concatenate([x for x in xs[i]], -1)) rows.append(np.concatenate(list(xs[i]), -1))
return np.concatenate(rows, 1) if is_batched else np.concatenate(rows, 0) return np.concatenate(rows, 1) if is_batched else np.concatenate(rows, 0)
......
...@@ -45,11 +45,9 @@ if core.is_compiled_with_npu(): ...@@ -45,11 +45,9 @@ if core.is_compiled_with_npu():
# include path # include path
site_packages_path = site.getsitepackages() site_packages_path = site.getsitepackages()
paddle_custom_kernel_include = list( paddle_custom_kernel_include = [
map( os.path.join(path, 'paddle', 'include') for path in site_packages_path
lambda path: os.path.join(path, 'paddle', 'include'), site_packages_path ]
)
)
# include path third_party # include path third_party
compile_third_party_path = os.path.join( compile_third_party_path = os.path.join(
...@@ -61,9 +59,9 @@ paddle_custom_kernel_include += [ ...@@ -61,9 +59,9 @@ paddle_custom_kernel_include += [
] ]
# libs path # libs path
paddle_custom_kernel_library_dir = list( paddle_custom_kernel_library_dir = [
map(lambda path: os.path.join(path, 'paddle', 'fluid'), site_packages_path) os.path.join(path, 'paddle', 'fluid') for path in site_packages_path
) ]
# libs # libs
libs = [':libpaddle.so'] libs = [':libpaddle.so']
......
...@@ -99,9 +99,7 @@ class TestRoIAlign(unittest.TestCase): ...@@ -99,9 +99,7 @@ class TestRoIAlign(unittest.TestCase):
self, self,
): ):
data = ( data = (
np.array([i for i in range(1, 17)]) np.array(list(range(1, 17))).reshape(1, 1, 4, 4).astype(np.float32)
.reshape(1, 1, 4, 4)
.astype(np.float32)
) )
boxes = np.array([[1.0, 1.0, 2.0, 2.0], [1.5, 1.5, 3.0, 3.0]]).astype( boxes = np.array([[1.0, 1.0, 2.0, 2.0], [1.5, 1.5, 3.0, 3.0]]).astype(
np.float32 np.float32
......
...@@ -100,9 +100,7 @@ class TestRoIPool(unittest.TestCase): ...@@ -100,9 +100,7 @@ class TestRoIPool(unittest.TestCase):
self, self,
): ):
data = ( data = (
np.array([i for i in range(1, 17)]) np.array(list(range(1, 17))).reshape(1, 1, 4, 4).astype(np.float32)
.reshape(1, 1, 4, 4)
.astype(np.float32)
) )
boxes = np.array([[1.0, 1.0, 2.0, 2.0], [1.5, 1.5, 3.0, 3.0]]).astype( boxes = np.array([[1.0, 1.0, 2.0, 2.0], [1.5, 1.5, 3.0, 3.0]]).astype(
np.float32 np.float32
......
...@@ -624,7 +624,7 @@ if __name__ == "__main__": ...@@ -624,7 +624,7 @@ if __name__ == "__main__":
os.path.dirname(file) for file in args.files os.path.dirname(file) for file in args.files
] ]
if len(args.dirpaths) >= 1: if len(args.dirpaths) >= 1:
current_work_dirs = current_work_dirs + [d for d in args.dirpaths] current_work_dirs = current_work_dirs + list(args.dirpaths)
cmake_generator = CMakeGenerator(current_work_dirs, args.ignore_cmake_dirs) cmake_generator = CMakeGenerator(current_work_dirs, args.ignore_cmake_dirs)
cmake_generator.prepare_dist_ut_port() cmake_generator.prepare_dist_ut_port()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册