未验证 提交 cf391b81 编写于 作者: P PuQing 提交者: GitHub

[CodeStyle][C408][C409][C410] Fix unnecessary <dict/list/tuple> call and...

[CodeStyle][C408][C409][C410] Fix unnecessary <dict/list/tuple> call and unnecessary <list/tuple> passed to <list/tupule>() (#51928)

* autofix

* add select config

* autofix C410

* add C410 select
上级 0480ff5d
...@@ -28,7 +28,7 @@ inplace_optional_out_type_map = { ...@@ -28,7 +28,7 @@ inplace_optional_out_type_map = {
class BaseAPI: class BaseAPI:
def __init__(self, api_item_yaml, prims=tuple()): def __init__(self, api_item_yaml, prims=()):
# self.api = api_item_yaml['op'] # self.api = api_item_yaml['op']
self.api = api_item_yaml['name'] self.api = api_item_yaml['name']
...@@ -248,7 +248,7 @@ class BaseAPI: ...@@ -248,7 +248,7 @@ class BaseAPI:
class EagerPrimAPI(BaseAPI): class EagerPrimAPI(BaseAPI):
def __init__(self, api_item_yaml, prims=tuple()): def __init__(self, api_item_yaml, prims=()):
super().__init__(api_item_yaml, prims) super().__init__(api_item_yaml, prims)
def get_api__func_name(self): def get_api__func_name(self):
......
...@@ -271,7 +271,7 @@ static_source_end = """ ...@@ -271,7 +271,7 @@ static_source_end = """
class PrimTensorAPI(BaseAPI): class PrimTensorAPI(BaseAPI):
def __init__(self, api_item_yaml, prims=tuple()): def __init__(self, api_item_yaml, prims=()):
super().__init__(api_item_yaml, prims) super().__init__(api_item_yaml, prims)
def get_api_func_name(self): def get_api_func_name(self):
......
...@@ -468,7 +468,7 @@ operants_manager_source_end = """ ...@@ -468,7 +468,7 @@ operants_manager_source_end = """
class OperantsAPI(ForwardAPI): class OperantsAPI(ForwardAPI):
def __init__(self, api_item_yaml, prims=tuple()): def __init__(self, api_item_yaml, prims=()):
super().__init__(api_item_yaml) super().__init__(api_item_yaml)
self.is_prim_api = False self.is_prim_api = False
if self.get_api_func_name() in prims: if self.get_api_func_name() in prims:
......
...@@ -36,7 +36,10 @@ select = [ ...@@ -36,7 +36,10 @@ select = [
"C400", "C400",
"C401", "C401",
"C402", "C402",
"C408",
"C409",
"C410",
# Pyupgrade # Pyupgrade
"UP001", "UP001",
"UP003", "UP003",
......
...@@ -19,7 +19,7 @@ from paddle import Tensor ...@@ -19,7 +19,7 @@ from paddle import Tensor
class WindowFunctionRegister: class WindowFunctionRegister:
def __init__(self): def __init__(self):
self._functions_dict = dict() self._functions_dict = {}
def register(self, func=None): def register(self, func=None):
def add_subfunction(func): def add_subfunction(func):
......
...@@ -43,7 +43,7 @@ UNK_IDX = 0 ...@@ -43,7 +43,7 @@ UNK_IDX = 0
def load_label_dict(filename): def load_label_dict(filename):
d = dict() d = {}
tag_dict = set() tag_dict = set()
with open(filename, 'r') as f: with open(filename, 'r') as f:
for i, line in enumerate(f): for i, line in enumerate(f):
...@@ -63,7 +63,7 @@ def load_label_dict(filename): ...@@ -63,7 +63,7 @@ def load_label_dict(filename):
def load_dict(filename): def load_dict(filename):
d = dict() d = {}
with open(filename, 'r') as f: with open(filename, 'r') as f:
for i, line in enumerate(f): for i, line in enumerate(f):
d[line.strip()] = i d[line.strip()] = i
......
...@@ -114,7 +114,7 @@ def __initialize_meta_info__(): ...@@ -114,7 +114,7 @@ def __initialize_meta_info__():
with zipfile.ZipFile(file=fn) as package: with zipfile.ZipFile(file=fn) as package:
for info in package.infolist(): for info in package.infolist():
assert isinstance(info, zipfile.ZipInfo) assert isinstance(info, zipfile.ZipInfo)
MOVIE_INFO = dict() MOVIE_INFO = {}
title_word_set = set() title_word_set = set()
categories_set = set() categories_set = set()
with package.open('ml-1m/movies.dat') as movie_file: with package.open('ml-1m/movies.dat') as movie_file:
...@@ -132,17 +132,17 @@ def __initialize_meta_info__(): ...@@ -132,17 +132,17 @@ def __initialize_meta_info__():
title_word_set.add(w.lower()) title_word_set.add(w.lower())
global MOVIE_TITLE_DICT global MOVIE_TITLE_DICT
MOVIE_TITLE_DICT = dict() MOVIE_TITLE_DICT = {}
for i, w in enumerate(title_word_set): for i, w in enumerate(title_word_set):
MOVIE_TITLE_DICT[w] = i MOVIE_TITLE_DICT[w] = i
global CATEGORIES_DICT global CATEGORIES_DICT
CATEGORIES_DICT = dict() CATEGORIES_DICT = {}
for i, c in enumerate(categories_set): for i, c in enumerate(categories_set):
CATEGORIES_DICT[c] = i CATEGORIES_DICT[c] = i
global USER_INFO global USER_INFO
USER_INFO = dict() USER_INFO = {}
with package.open('ml-1m/users.dat') as user_file: with package.open('ml-1m/users.dat') as user_file:
for line in user_file: for line in user_file:
line = line.decode(encoding='latin') line = line.decode(encoding='latin')
......
...@@ -48,7 +48,7 @@ UNK_IDX = 2 ...@@ -48,7 +48,7 @@ UNK_IDX = 2
def __read_to_dict(tar_file, dict_size): def __read_to_dict(tar_file, dict_size):
def __to_dict(fd, size): def __to_dict(fd, size):
out_dict = dict() out_dict = {}
for line_count, line in enumerate(fd): for line_count, line in enumerate(fd):
if line_count < size: if line_count < size:
out_dict[line.strip().decode()] = line_count out_dict[line.strip().decode()] = line_count
......
...@@ -276,7 +276,7 @@ class AutoAlignTool: ...@@ -276,7 +276,7 @@ class AutoAlignTool:
save_dir, "dist_attr_rank{}.pkl".format(dist.get_rank()) save_dir, "dist_attr_rank{}.pkl".format(dist.get_rank())
) )
if vars is not None: if vars is not None:
vars_dict = dict() vars_dict = {}
assert len(fetch_list) == len(vars) assert len(fetch_list) == len(vars)
for i in range(len(fetch_list)): for i in range(len(fetch_list)):
if vars[i] is None: if vars[i] is None:
...@@ -331,7 +331,7 @@ class AutoAlignTool: ...@@ -331,7 +331,7 @@ class AutoAlignTool:
with (open(filepath, "rb")) as f: with (open(filepath, "rb")) as f:
dist_attr_list.append(pickle.load(f)) dist_attr_list.append(pickle.load(f))
dist_attr_map = dict() dist_attr_map = {}
for dist_attrs in dist_attr_list: for dist_attrs in dist_attr_list:
for dist_attr_name in dist_attrs.keys(): for dist_attr_name in dist_attrs.keys():
if dist_attr_name not in dist_attr_map: if dist_attr_name not in dist_attr_map:
...@@ -352,9 +352,9 @@ class AutoAlignTool: ...@@ -352,9 +352,9 @@ class AutoAlignTool:
if src_attr_map is None or len(src_attr_map) == 0: if src_attr_map is None or len(src_attr_map) == 0:
return vars_list[0] return vars_list[0]
dst_strategys = dict() dst_strategys = {}
src_strategys = dict() src_strategys = {}
tensors_dict = dict() tensors_dict = {}
convert_tensor_dict = None convert_tensor_dict = None
for var_name in src_attr_map.keys(): for var_name in src_attr_map.keys():
...@@ -451,7 +451,7 @@ class AutoAlignTool: ...@@ -451,7 +451,7 @@ class AutoAlignTool:
def diff_informations_from_dirs(right_dirs, wrong_dirs): def diff_informations_from_dirs(right_dirs, wrong_dirs):
right_vars_list = [] right_vars_list = []
right_program_list = [] right_program_list = []
right_dist_attr_map = dict() right_dist_attr_map = {}
for right_dir in right_dirs: for right_dir in right_dirs:
( (
tmp_vars_list, tmp_vars_list,
...@@ -473,7 +473,7 @@ class AutoAlignTool: ...@@ -473,7 +473,7 @@ class AutoAlignTool:
wrong_vars_list = [] wrong_vars_list = []
wrong_program_list = [] wrong_program_list = []
wrong_dist_attr_map = dict() wrong_dist_attr_map = {}
for wrong_dir in wrong_dirs: for wrong_dir in wrong_dirs:
( (
tmp_vars_list, tmp_vars_list,
......
...@@ -550,7 +550,7 @@ class Completer: ...@@ -550,7 +550,7 @@ class Completer:
def _find_nodes_related_to_cond(source_node): def _find_nodes_related_to_cond(source_node):
related_nodes = [] related_nodes = []
visited = set() visited = set()
frontier = list() frontier = []
frontier.append(source_node) frontier.append(source_node)
# BFS # BFS
while len(frontier) != 0: while len(frontier) != 0:
......
...@@ -421,7 +421,7 @@ class _CommunicateGroup: ...@@ -421,7 +421,7 @@ class _CommunicateGroup:
def __init__(self): def __init__(self):
global _HYBRID_PARALLEL_GROUP global _HYBRID_PARALLEL_GROUP
_HYBRID_PARALLEL_GROUP = self _HYBRID_PARALLEL_GROUP = self
self.groups = dict() self.groups = {}
def set_comm_group( def set_comm_group(
self, group_name, group_rank, group_size, ring_id, group_ranks self, group_name, group_rank, group_size, ring_id, group_ranks
......
...@@ -530,7 +530,7 @@ class ElasticManager: ...@@ -530,7 +530,7 @@ class ElasticManager:
# 10.10.10.0 is removed # 10.10.10.0 is removed
# the new trainers is:10.10.10.3,10.10.10.1,10.10.10.2 # the new trainers is:10.10.10.3,10.10.10.1,10.10.10.2
# In this case, the rank of 10.10.10.1 and 10.10.10.2 remains unchanged, while the rank of 10.10.10.3 is set to rank0 # In this case, the rank of 10.10.10.1 and 10.10.10.2 remains unchanged, while the rank of 10.10.10.3 is set to rank0
endpoints_dict = dict() endpoints_dict = {}
unsorted_endpoints = [] unsorted_endpoints = []
for id, host_port in enumerate(self.hosts): for id, host_port in enumerate(self.hosts):
idx = host_endpoints.index(host_port) idx = host_endpoints.index(host_port)
......
...@@ -1298,7 +1298,7 @@ class Fleet: ...@@ -1298,7 +1298,7 @@ class Fleet:
self.origin_main_program = loss.block.program self.origin_main_program = loss.block.program
# add distributed attr # add distributed attr
if not hasattr(self.origin_main_program, "distributed_info_"): if not hasattr(self.origin_main_program, "distributed_info_"):
self.origin_main_program.distributed_info_ = dict() self.origin_main_program.distributed_info_ = {}
self.origin_main_program.distributed_info_[ self.origin_main_program.distributed_info_[
"dp_degree" "dp_degree"
] = self._user_defined_strategy.sharding_configs["dp_degree"] ] = self._user_defined_strategy.sharding_configs["dp_degree"]
......
...@@ -318,7 +318,7 @@ class ParameterServerOptimizer(MetaOptimizerBase): ...@@ -318,7 +318,7 @@ class ParameterServerOptimizer(MetaOptimizerBase):
upper_mem_use = param_memory_size * 5.0 upper_mem_use = param_memory_size * 5.0
program_tmp_vars = dict() program_tmp_vars = {}
eval_batch_size = 1024 eval_batch_size = 1024
for op in program.global_block().ops: for op in program.global_block().ops:
for var_name in op.output_arg_names: for var_name in op.output_arg_names:
......
...@@ -214,7 +214,7 @@ class PipelineOptimizer(MetaOptimizerBase): ...@@ -214,7 +214,7 @@ class PipelineOptimizer(MetaOptimizerBase):
block = loss.block block = loss.block
program = block.program program = block.program
program._pipeline_opt = dict() program._pipeline_opt = {}
program._pipeline_opt['local_rank'] = self.rank program._pipeline_opt['local_rank'] = self.rank
program._pipeline_opt['global_ring_id'] = self.global_ring_id program._pipeline_opt['global_ring_id'] = self.global_ring_id
program._pipeline_opt['ring_id'] = self.start_pipeline_ring_id program._pipeline_opt['ring_id'] = self.start_pipeline_ring_id
......
...@@ -222,7 +222,7 @@ class ParameterServerOptimizer(MetaOptimizerBase): ...@@ -222,7 +222,7 @@ class ParameterServerOptimizer(MetaOptimizerBase):
upper_mem_use = param_memory_size * 5.0 upper_mem_use = param_memory_size * 5.0
program_tmp_vars = dict() program_tmp_vars = {}
eval_batch_size = 1024 eval_batch_size = 1024
for op in program.global_block().ops: for op in program.global_block().ops:
for var_name in op.output_arg_names: for var_name in op.output_arg_names:
......
...@@ -156,11 +156,11 @@ class OffloadHelper: ...@@ -156,11 +156,11 @@ class OffloadHelper:
(p_fp16) = cast(p) (p_fp16) = cast(p)
(p@offload) = memcpy(p) (p@offload) = memcpy(p)
""" """
param_to_idx = dict() param_to_idx = {}
param_to_fp16 = dict() param_to_fp16 = {}
# recompute_var which need rename to fp16_param # recompute_var which need rename to fp16_param
fp16_param_to_recompute = dict() fp16_param_to_recompute = {}
recompute_to_fp16 = dict() recompute_to_fp16 = {}
def remove_param(input_name): def remove_param(input_name):
param_to_idx.pop(input_name) param_to_idx.pop(input_name)
...@@ -215,7 +215,7 @@ class OffloadHelper: ...@@ -215,7 +215,7 @@ class OffloadHelper:
fp16_param_to_recompute[fp16_param] = output_name fp16_param_to_recompute[fp16_param] = output_name
recompute_to_fp16[output_name] = fp16_param recompute_to_fp16[output_name] = fp16_param
param_name_to_offload_name = dict() param_name_to_offload_name = {}
# step3: main_block add offload, cast op # step3: main_block add offload, cast op
# change recompute to fp16, remove cast(param) to fp16 # change recompute to fp16, remove cast(param) to fp16
for idx, op in reversed(list(enumerate(block.ops))): for idx, op in reversed(list(enumerate(block.ops))):
...@@ -325,7 +325,7 @@ class OffloadHelper: ...@@ -325,7 +325,7 @@ class OffloadHelper:
(m1out, m2out, pout) = adam(m1, m2, p) (m1out, m2out, pout) = adam(m1, m2, p)
(m1@offload, m2@offload) = memcpy(m1, m2) (m1@offload, m2@offload) = memcpy(m1, m2)
""" """
vars_name_to_offload_name = dict() vars_name_to_offload_name = {}
# main_block add offload # main_block add offload
for idx, op in reversed(list(enumerate(block.ops))): for idx, op in reversed(list(enumerate(block.ops))):
...@@ -403,10 +403,10 @@ class OffloadHelper: ...@@ -403,10 +403,10 @@ class OffloadHelper:
""" """
global_params = set() global_params = set()
local_params = set() local_params = set()
param_to_fp16 = dict() param_to_fp16 = {}
# recompute_var which need rename to fp16_param # recompute_var which need rename to fp16_param
fp16_param_to_recompute = dict() fp16_param_to_recompute = {}
recompute_to_fp16 = dict() recompute_to_fp16 = {}
def remove_param(input_name): def remove_param(input_name):
global_params.remove(input_name) global_params.remove(input_name)
...@@ -464,7 +464,7 @@ class OffloadHelper: ...@@ -464,7 +464,7 @@ class OffloadHelper:
fp16_param_to_recompute[fp16_param] = output_name fp16_param_to_recompute[fp16_param] = output_name
recompute_to_fp16[output_name] = fp16_param recompute_to_fp16[output_name] = fp16_param
param_name_to_offload_name = dict() param_name_to_offload_name = {}
# step3: main_block add offload, cast op # step3: main_block add offload, cast op
# change recompute to fp16, remove cast(param) to fp16 # change recompute to fp16, remove cast(param) to fp16
for idx, op in reversed(list(enumerate(block.ops))): for idx, op in reversed(list(enumerate(block.ops))):
......
...@@ -30,8 +30,8 @@ class Shard: ...@@ -30,8 +30,8 @@ class Shard:
self.global_params = set([]) self.global_params = set([])
self.worker_idx = -1 self.worker_idx = -1
self.worker_num = -1 self.worker_num = -1
self.global_param2device = dict() self.global_param2device = {}
self.device2global_params = dict() self.device2global_params = {}
def setup(self, params_grads, worker_idx, worker_num): def setup(self, params_grads, worker_idx, worker_num):
# param names of all devices # param names of all devices
......
...@@ -230,7 +230,7 @@ class ShardingOptimizer(MetaOptimizerBase): ...@@ -230,7 +230,7 @@ class ShardingOptimizer(MetaOptimizerBase):
gm_acc_step = int(sharding_configs["gradient_merge_acc_step"]) gm_acc_step = int(sharding_configs["gradient_merge_acc_step"])
if self.pp_degree <= 1: if self.pp_degree <= 1:
gm_mode = "sharding_gm" gm_mode = "sharding_gm"
self._grad2merged_grad = dict() self._grad2merged_grad = {}
else: else:
gm_mode = "pp_gm" gm_mode = "pp_gm"
gm_acc_step = strategy.pipeline_configs['accumulate_steps'] gm_acc_step = strategy.pipeline_configs['accumulate_steps']
...@@ -937,7 +937,7 @@ class ShardingOptimizer(MetaOptimizerBase): ...@@ -937,7 +937,7 @@ class ShardingOptimizer(MetaOptimizerBase):
last_backward_op_idx = op_idx + 1 last_backward_op_idx = op_idx + 1
break break
var2broadcast_time = dict() var2broadcast_time = {}
segment = ProgramSegment(block) segment = ProgramSegment(block)
segment._end_idx = last_backward_op_idx segment._end_idx = last_backward_op_idx
for op_idx in reversed(range(last_backward_op_idx)): for op_idx in reversed(range(last_backward_op_idx)):
......
...@@ -53,7 +53,7 @@ align = { ...@@ -53,7 +53,7 @@ align = {
} }
global CHECK_LAYER global CHECK_LAYER
CHECK_LAYER = dict() # Help to check layer's id -> layer's name CHECK_LAYER = {} # Help to check layer's id -> layer's name
class GroupShardedStage3(nn.Layer): class GroupShardedStage3(nn.Layer):
...@@ -120,7 +120,7 @@ class GroupShardedStage3(nn.Layer): ...@@ -120,7 +120,7 @@ class GroupShardedStage3(nn.Layer):
else int(paddle.get_device().split(":")[1]) else int(paddle.get_device().split(":")[1])
) )
global param2dtype global param2dtype
param2dtype = dict() param2dtype = {}
# Communication group establishment # Communication group establishment
self._group = ( self._group = (
...@@ -140,14 +140,14 @@ class GroupShardedStage3(nn.Layer): ...@@ -140,14 +140,14 @@ class GroupShardedStage3(nn.Layer):
# Parameter segmentation for global ranks # Parameter segmentation for global ranks
# After flatten -> self._param2buffer_size, self._param2buffer, self._trainable_params # After flatten -> self._param2buffer_size, self._param2buffer, self._trainable_params
self._param2buffer_size = dict() # {param.name: size} self._param2buffer_size = {} # {param.name: size}
self._param2buffer = ( self._param2buffer = (
dict() {}
) # {param.name: [(start0, end0),(start1, end1), ...]} ) # {param.name: [(start0, end0),(start1, end1), ...]}
self._trainable_params = dict() # {id(layer): [trainable_params]} self._trainable_params = {} # {id(layer): [trainable_params]}
self._unslice_params = set() # param's numel <= segment_size self._unslice_params = set() # param's numel <= segment_size
self._unslice_params2align = dict() # {param.name: param's align} self._unslice_params2align = {} # {param.name: param's align}
self._grad_storages = dict() # {param.dtype: GradStorage} self._grad_storages = {} # {param.dtype: GradStorage}
assert not isinstance( assert not isinstance(
optimizer, list optimizer, list
...@@ -185,7 +185,7 @@ class GroupShardedStage3(nn.Layer): ...@@ -185,7 +185,7 @@ class GroupShardedStage3(nn.Layer):
# In the first step, record the execution order of the layer # In the first step, record the execution order of the layer
self._order_tracer = OrderedDict() self._order_tracer = OrderedDict()
self._order_tracer["order"] = 0 self._order_tracer["order"] = 0
self._order_tracer["layer"] = list() self._order_tracer["layer"] = []
# Register task flow # Register task flow
self._task_flow = TaskFlow() self._task_flow = TaskFlow()
...@@ -315,7 +315,7 @@ class GroupShardedStage3(nn.Layer): ...@@ -315,7 +315,7 @@ class GroupShardedStage3(nn.Layer):
) )
def _handle_unslice_params(self): def _handle_unslice_params(self):
buffer_size = dict() buffer_size = {}
buffer_size[Type.bf16.value] = 0 buffer_size[Type.bf16.value] = 0
buffer_size[Type.fp32.value] = 0 buffer_size[Type.fp32.value] = 0
buffer_size[Type.fp16.value] = 0 buffer_size[Type.fp16.value] = 0
...@@ -381,7 +381,7 @@ class GroupShardedStage3(nn.Layer): ...@@ -381,7 +381,7 @@ class GroupShardedStage3(nn.Layer):
def _add_manage_info(trainable_param): def _add_manage_info(trainable_param):
return _PartitionParam(trainable_param) return _PartitionParam(trainable_param)
current_params = list() current_params = []
for p in current_layer_params: for p in current_layer_params:
if p._numel() > self._segment_size: if p._numel() > self._segment_size:
current_params.append(_add_manage_info(p)) current_params.append(_add_manage_info(p))
...@@ -911,9 +911,9 @@ class TaskFlow: ...@@ -911,9 +911,9 @@ class TaskFlow:
def __init__( def __init__(
self, self,
full_param=dict(), full_param={},
full_grad=dict(), full_grad={},
use_calc=dict(), use_calc={},
callback=None, callback=None,
): ):
self.full_param = full_param self.full_param = full_param
......
...@@ -125,7 +125,7 @@ class ParamStorage(InternalStorage): ...@@ -125,7 +125,7 @@ class ParamStorage(InternalStorage):
self.param2align = param2align self.param2align = param2align
cpu_param_shape = list() cpu_param_shape = []
for param in trainable_params: for param in trainable_params:
p_shape = self._add_param_as_view( p_shape = self._add_param_as_view(
param, param2align[param.name], convert_gpu param, param2align[param.name], convert_gpu
......
...@@ -214,11 +214,11 @@ class HybridParallelInferenceHelper: ...@@ -214,11 +214,11 @@ class HybridParallelInferenceHelper:
self._op_role_key = op_maker.kOpRoleAttrName() self._op_role_key = op_maker.kOpRoleAttrName()
self._op_device_key = op_maker.kOpDeviceAttrName() self._op_device_key = op_maker.kOpDeviceAttrName()
self._param_device_map = dict() self._param_device_map = {}
self._pipeline_pair = [] self._pipeline_pair = []
self._pipeline_pair_in_while = [] self._pipeline_pair_in_while = []
self._pp_ring_map = dict() self._pp_ring_map = {}
self.ring_id = 20 # Just a magic number self.ring_id = 20 # Just a magic number
self.micro_batch_size = micro_batch_size self.micro_batch_size = micro_batch_size
...@@ -556,7 +556,7 @@ class HybridParallelInferenceHelper: ...@@ -556,7 +556,7 @@ class HybridParallelInferenceHelper:
""" """
# A map from var to device where op takes it as input, # A map from var to device where op takes it as input,
# avoiding multiple send and recv ops. # avoiding multiple send and recv ops.
input_var_to_device = dict() input_var_to_device = {}
extra_index_info = { extra_index_info = {
'index': 0, 'index': 0,
......
...@@ -180,7 +180,7 @@ class DistributedInfer: ...@@ -180,7 +180,7 @@ class DistributedInfer:
if input_indexes[i] == 1: if input_indexes[i] == 1:
move_ops.append((global_block.ops[i], i)) move_ops.append((global_block.ops[i], i))
for i, op in enumerate(move_ops): for i, op in enumerate(move_ops):
queue = list() queue = []
visited = set() visited = set()
queue.append(op[1]) queue.append(op[1])
visited.add(op[0]) visited.add(op[0])
......
...@@ -76,7 +76,7 @@ class QuantizationPass(PassBase): ...@@ -76,7 +76,7 @@ class QuantizationPass(PassBase):
) )
# 0. record the relation among blocks # 0. record the relation among blocks
parent_idx_dict = dict() parent_idx_dict = {}
for block in main_program.blocks: for block in main_program.blocks:
parent_idx_dict[block.idx] = block.parent_idx parent_idx_dict[block.idx] = block.parent_idx
......
...@@ -1806,7 +1806,7 @@ class ShardingInfo: ...@@ -1806,7 +1806,7 @@ class ShardingInfo:
self.params, self.group_size, self.partition_algor self.params, self.group_size, self.partition_algor
) )
# include fp32 and fp16 param # include fp32 and fp16 param
self.param_to_rank = dict() self.param_to_rank = {}
self._map_param_to_rank() self._map_param_to_rank()
def _map_param_to_rank(self): def _map_param_to_rank(self):
......
...@@ -277,7 +277,7 @@ class DistributedOpsPass(PassBase): ...@@ -277,7 +277,7 @@ class DistributedOpsPass(PassBase):
if input_indexes[i] == 1: if input_indexes[i] == 1:
move_ops.append((global_block.ops[i], i)) move_ops.append((global_block.ops[i], i))
for i, op in enumerate(move_ops): for i, op in enumerate(move_ops):
queue = list() queue = []
visited = set() visited = set()
queue.append(op[1]) queue.append(op[1])
visited.add(op[0]) visited.add(op[0])
...@@ -334,11 +334,11 @@ class DistributedOpsPass(PassBase): ...@@ -334,11 +334,11 @@ class DistributedOpsPass(PassBase):
assert global_block.desc.op(i) == global_block.ops[i].desc assert global_block.desc.op(i) == global_block.ops[i].desc
if attrs['use_ps_gpu']: if attrs['use_ps_gpu']:
gpups_inputs_idxs = list() gpups_inputs_idxs = []
gpups_outputs_idxs = list() gpups_outputs_idxs = []
gpups_inputs = list() gpups_inputs = []
gpups_outputs = list() gpups_outputs = []
gpups_w_size = list() gpups_w_size = []
gpups_min_distributed_idx = len(_program.global_block().ops) + 1 gpups_min_distributed_idx = len(_program.global_block().ops) + 1
for param, ops in pull_sparse_ops.items(): for param, ops in pull_sparse_ops.items():
......
...@@ -110,7 +110,7 @@ class FLClientBase(abc.ABC): ...@@ -110,7 +110,7 @@ class FLClientBase(abc.ABC):
self.role_maker = role_maker self.role_maker = role_maker
self.config = config self.config = config
self.total_train_epoch = int(self.config.get("runner.epochs")) self.total_train_epoch = int(self.config.get("runner.epochs"))
self.train_statical_info = dict() self.train_statical_info = {}
self.train_statical_info['speed'] = [] self.train_statical_info['speed'] = []
self.epoch_idx = 0 self.epoch_idx = 0
self.worker_index = fleet.worker_index() self.worker_index = fleet.worker_index()
...@@ -121,7 +121,7 @@ class FLClientBase(abc.ABC): ...@@ -121,7 +121,7 @@ class FLClientBase(abc.ABC):
logger.info( logger.info(
"fl-ps > coordinator enpoints: {}".format(self._coordinators) "fl-ps > coordinator enpoints: {}".format(self._coordinators)
) )
self.strategy_handlers = dict() self.strategy_handlers = {}
self.exe = None self.exe = None
self.use_cuda = int(self.config.get("runner.use_gpu")) self.use_cuda = int(self.config.get("runner.use_gpu"))
self.place = paddle.CUDAPlace(0) if self.use_cuda else paddle.CPUPlace() self.place = paddle.CUDAPlace(0) if self.use_cuda else paddle.CPUPlace()
......
...@@ -711,8 +711,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler ...@@ -711,8 +711,8 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler
ps_dispatcher = self.config.split_method(self.pserver_endpoints) ps_dispatcher = self.config.split_method(self.pserver_endpoints)
self.table_name = find_distributed_lookup_table(self.origin_program) self.table_name = find_distributed_lookup_table(self.origin_program)
self.has_distributed_lookup_table = self.table_name is not None self.has_distributed_lookup_table = self.table_name is not None
self.param_name_to_grad_name = dict() self.param_name_to_grad_name = {}
self.grad_name_to_param_name = dict() self.grad_name_to_param_name = {}
for param_var, grad_var in self.params_grads: for param_var, grad_var in self.params_grads:
self.param_name_to_grad_name[param_var.name] = grad_var.name self.param_name_to_grad_name[param_var.name] = grad_var.name
self.grad_name_to_param_name[grad_var.name] = param_var.name self.grad_name_to_param_name[grad_var.name] = param_var.name
...@@ -722,7 +722,7 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler ...@@ -722,7 +722,7 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler
self.origin_program self.origin_program
) )
# use_sparse_update_param_name -> split_height_section # use_sparse_update_param_name -> split_height_section
self.sparse_param_to_height_sections = dict() self.sparse_param_to_height_sections = {}
self.need_delete_optimize_vars = [] self.need_delete_optimize_vars = []
# add distributed attrs to program # add distributed attrs to program
...@@ -753,7 +753,7 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler ...@@ -753,7 +753,7 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler
np.random.seed(self.origin_program.random_seed) np.random.seed(self.origin_program.random_seed)
np.random.shuffle(grad_var_mapping_items) np.random.shuffle(grad_var_mapping_items)
self.grad_name_to_send_dummy_out = dict() self.grad_name_to_send_dummy_out = {}
for grad_varname, splited_vars in grad_var_mapping_items: for grad_varname, splited_vars in grad_var_mapping_items:
eplist = ps_dispatcher.dispatch(splited_vars) eplist = ps_dispatcher.dispatch(splited_vars)
......
...@@ -101,8 +101,8 @@ class GeoSgdTranspiler(DistributeTranspiler): ...@@ -101,8 +101,8 @@ class GeoSgdTranspiler(DistributeTranspiler):
self.vars_overview = VarsDistributed() self.vars_overview = VarsDistributed()
self.optimize_ops, self.params_grads = self._get_optimize_pass() self.optimize_ops, self.params_grads = self._get_optimize_pass()
ps_dispatcher = self.config.split_method(self.pserver_endpoints) ps_dispatcher = self.config.split_method(self.pserver_endpoints)
self.param_name_to_grad_name = dict() self.param_name_to_grad_name = {}
self.grad_name_to_param_name = dict() self.grad_name_to_param_name = {}
for param_var, grad_var in self.params_grads: for param_var, grad_var in self.params_grads:
self.param_name_to_grad_name[param_var.name] = grad_var.name self.param_name_to_grad_name[param_var.name] = grad_var.name
self.grad_name_to_param_name[grad_var.name] = param_var.name self.grad_name_to_param_name[grad_var.name] = param_var.name
......
...@@ -99,7 +99,7 @@ class TestPrimDistOp(unittest.TestCase): ...@@ -99,7 +99,7 @@ class TestPrimDistOp(unittest.TestCase):
completer.complete_prim_annotation(self.main_program) completer.complete_prim_annotation(self.main_program)
dist_context.block_state.parse_forward_blocks(self.main_program) dist_context.block_state.parse_forward_blocks(self.main_program)
dist_context.block_state.parse_backward_blocks(self.main_program) dist_context.block_state.parse_backward_blocks(self.main_program)
dist_context.grads_params = dict() dist_context.grads_params = {}
dist_context.grads_params[self.w_grad.name] = self.w.name dist_context.grads_params[self.w_grad.name] = self.w.name
dist_context.synced_gradient = set() dist_context.synced_gradient = set()
dist_context.data_parallel_group = list(range(nranks)) dist_context.data_parallel_group = list(range(nranks))
......
...@@ -457,7 +457,7 @@ class EncoderLayer(Layer): ...@@ -457,7 +457,7 @@ class EncoderLayer(Layer):
super().__init__() super().__init__()
self._preprocess_cmd = preprocess_cmd self._preprocess_cmd = preprocess_cmd
self._encoder_sublayers = list() self._encoder_sublayers = []
self._prepostprocess_dropout = prepostprocess_dropout self._prepostprocess_dropout = prepostprocess_dropout
self._n_layer = n_layer self._n_layer = n_layer
self._preprocess_layer = PrePostProcessLayer( self._preprocess_layer = PrePostProcessLayer(
...@@ -734,7 +734,7 @@ class DecoderLayer(Layer): ...@@ -734,7 +734,7 @@ class DecoderLayer(Layer):
self._pre_process_layer = PrePostProcessLayer( self._pre_process_layer = PrePostProcessLayer(
d_model, preprocess_cmd, 3 d_model, preprocess_cmd, 3
) )
self._decoder_sub_layers = list() self._decoder_sub_layers = []
self._n_layer = n_layer self._n_layer = n_layer
self._preprocess_cmd = preprocess_cmd self._preprocess_cmd = preprocess_cmd
self._prepostprocess_dropout = prepostprocess_dropout self._prepostprocess_dropout = prepostprocess_dropout
......
...@@ -47,7 +47,7 @@ class TestStrategyFactor(unittest.TestCase): ...@@ -47,7 +47,7 @@ class TestStrategyFactor(unittest.TestCase):
self.assertEqual(program_config.min_block_size, 81920) self.assertEqual(program_config.min_block_size, 81920)
# test set_program_config using dict # test set_program_config using dict
program_config_dict = dict() program_config_dict = {}
program_config_dict['min_block_size'] = 8192 program_config_dict['min_block_size'] = 8192
strategy.set_program_config(program_config_dict) strategy.set_program_config(program_config_dict)
program_config = strategy.get_program_config() program_config = strategy.get_program_config()
...@@ -90,7 +90,7 @@ class TestStrategyFactor(unittest.TestCase): ...@@ -90,7 +90,7 @@ class TestStrategyFactor(unittest.TestCase):
self.assertEqual(build_strategy.memory_optimize, False) self.assertEqual(build_strategy.memory_optimize, False)
# test set_build_strategy using dict # test set_build_strategy using dict
build_strategy_dict = dict() build_strategy_dict = {}
build_strategy_dict['memory_optimize'] = True build_strategy_dict['memory_optimize'] = True
strategy.set_build_strategy(build_strategy_dict) strategy.set_build_strategy(build_strategy_dict)
build_strategy = strategy.get_build_strategy() build_strategy = strategy.get_build_strategy()
...@@ -132,7 +132,7 @@ class TestStrategyFactor(unittest.TestCase): ...@@ -132,7 +132,7 @@ class TestStrategyFactor(unittest.TestCase):
) )
# test set_trainer_runtime_config using dict # test set_trainer_runtime_config using dict
trainer_runtime_config_dict = dict() trainer_runtime_config_dict = {}
trainer_runtime_config_dict['communicator_send_queue_size'] = '20' trainer_runtime_config_dict['communicator_send_queue_size'] = '20'
strategy.set_trainer_runtime_config(trainer_runtime_config_dict) strategy.set_trainer_runtime_config(trainer_runtime_config_dict)
trainer_runtime_config = strategy.get_trainer_runtime_config() trainer_runtime_config = strategy.get_trainer_runtime_config()
...@@ -168,7 +168,7 @@ class TestStrategyFactor(unittest.TestCase): ...@@ -168,7 +168,7 @@ class TestStrategyFactor(unittest.TestCase):
self.assertEqual(exec_strategy.num_threads, 4) self.assertEqual(exec_strategy.num_threads, 4)
# test set_execute_strategy using dict # test set_execute_strategy using dict
exec_strategy_dict = dict() exec_strategy_dict = {}
exec_strategy_dict['num_threads'] = 8 exec_strategy_dict['num_threads'] = 8
strategy.set_execute_strategy(exec_strategy_dict) strategy.set_execute_strategy(exec_strategy_dict)
exec_strategy = strategy.get_execute_strategy() exec_strategy = strategy.get_execute_strategy()
...@@ -198,7 +198,7 @@ class TestStrategyFactor(unittest.TestCase): ...@@ -198,7 +198,7 @@ class TestStrategyFactor(unittest.TestCase):
self.assertEqual(server_runtime_config._rpc_send_thread_num, 24) self.assertEqual(server_runtime_config._rpc_send_thread_num, 24)
# test set_server_runtime_config using dict # test set_server_runtime_config using dict
server_runtime_config_dict = dict() server_runtime_config_dict = {}
server_runtime_config_dict['_rpc_send_thread_num'] = 20 server_runtime_config_dict['_rpc_send_thread_num'] = 20
strategy.set_server_runtime_config(server_runtime_config_dict) strategy.set_server_runtime_config(server_runtime_config_dict)
server_runtime_config = strategy.get_server_runtime_config() server_runtime_config = strategy.get_server_runtime_config()
......
...@@ -81,7 +81,7 @@ class TestDistBase(unittest.TestCase): ...@@ -81,7 +81,7 @@ class TestDistBase(unittest.TestCase):
def _run_cluster(self, model_file, envs): def _run_cluster(self, model_file, envs):
run_cluster_process = f"{self._python_interp} -u -m paddle.distributed.launch --log_dir {self.temp_dir.name} {model_file}" run_cluster_process = f"{self._python_interp} -u -m paddle.distributed.launch --log_dir {self.temp_dir.name} {model_file}"
filted_envs = dict() filted_envs = {}
for k in envs.keys(): for k in envs.keys():
if "PADDLE_" == k[:7] and k not in [ if "PADDLE_" == k[:7] and k not in [
"PADDLE_NNODES", "PADDLE_NNODES",
......
...@@ -73,7 +73,7 @@ class CommunicationTestDistBase(unittest.TestCase): ...@@ -73,7 +73,7 @@ class CommunicationTestDistBase(unittest.TestCase):
def gen_product_envs_list(default_envs, changeable_envs): def gen_product_envs_list(default_envs, changeable_envs):
envs_list = list() envs_list = []
for values in itertools.product(*changeable_envs.values()): for values in itertools.product(*changeable_envs.values()):
envs = dict(zip(changeable_envs.keys(), values)) envs = dict(zip(changeable_envs.keys(), values))
envs.update(default_envs) envs.update(default_envs)
......
...@@ -144,7 +144,7 @@ class EncoderLayer(Layer): ...@@ -144,7 +144,7 @@ class EncoderLayer(Layer):
super().__init__() super().__init__()
self._preprocess_cmd = preprocess_cmd self._preprocess_cmd = preprocess_cmd
self._encoder_sublayers = list() self._encoder_sublayers = []
self._prepostprocess_dropout = prepostprocess_dropout self._prepostprocess_dropout = prepostprocess_dropout
self._n_layer = n_layer self._n_layer = n_layer
self._hidden_act = hidden_act self._hidden_act = hidden_act
......
...@@ -331,7 +331,7 @@ class FC(Layer): ...@@ -331,7 +331,7 @@ class FC(Layer):
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._act = act self._act = act
self.__w = list() self.__w = []
def _build_once(self, input): def _build_once(self, input):
i = 0 i = 0
...@@ -358,7 +358,7 @@ class FC(Layer): ...@@ -358,7 +358,7 @@ class FC(Layer):
) )
i += 1 i += 1
size = list([self._size]) size = [self._size]
self._b = self.create_parameter( self._b = self.create_parameter(
attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True
) )
...@@ -394,7 +394,7 @@ class FC(Layer): ...@@ -394,7 +394,7 @@ class FC(Layer):
self._b = value self._b = value
def forward(self, input): def forward(self, input):
mul_results = list() mul_results = []
i = 0 i = 0
for inp, param in self._helper.iter_inputs_and_params( for inp, param in self._helper.iter_inputs_and_params(
input, self._param_attr input, self._param_attr
......
...@@ -317,7 +317,7 @@ class FC(paddle.nn.Layer): ...@@ -317,7 +317,7 @@ class FC(paddle.nn.Layer):
self._param_attr = param_attr self._param_attr = param_attr
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._act = act self._act = act
self.__w = list() self.__w = []
def _build_once(self, input): def _build_once(self, input):
i = 0 i = 0
...@@ -344,7 +344,7 @@ class FC(paddle.nn.Layer): ...@@ -344,7 +344,7 @@ class FC(paddle.nn.Layer):
) )
i += 1 i += 1
size = list([self._size]) size = [self._size]
self._b = self.create_parameter( self._b = self.create_parameter(
attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True attr=self._bias_attr, shape=size, dtype=self._dtype, is_bias=True
) )
...@@ -380,7 +380,7 @@ class FC(paddle.nn.Layer): ...@@ -380,7 +380,7 @@ class FC(paddle.nn.Layer):
self._b = value self._b = value
def forward(self, input): def forward(self, input):
mul_results = list() mul_results = []
i = 0 i = 0
for inp, param in self._helper.iter_inputs_and_params( for inp, param in self._helper.iter_inputs_and_params(
input, self._param_attr input, self._param_attr
......
...@@ -43,8 +43,8 @@ class TestGetterSetterHelper(unittest.TestCase): ...@@ -43,8 +43,8 @@ class TestGetterSetterHelper(unittest.TestCase):
assert vars == [1, 1, 3, 10, 12] assert vars == [1, 1, 3, 10, 12]
helper.set(None, None) helper.set(None, None)
assert vars == [1, 1, 3, 10, 12] assert vars == [1, 1, 3, 10, 12]
assert helper.get(None) == tuple() assert helper.get(None) == ()
assert helper.get([]) == tuple() assert helper.get([]) == ()
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -49,7 +49,7 @@ corpus = data_preprocess(corpus) ...@@ -49,7 +49,7 @@ corpus = data_preprocess(corpus)
def build_dict(corpus, min_freq=3): def build_dict(corpus, min_freq=3):
word_freq_dict = dict() word_freq_dict = {}
for line in corpus: for line in corpus:
for word in line: for word in line:
if word not in word_freq_dict: if word not in word_freq_dict:
...@@ -60,9 +60,9 @@ def build_dict(corpus, min_freq=3): ...@@ -60,9 +60,9 @@ def build_dict(corpus, min_freq=3):
word_freq_dict.items(), key=lambda x: x[1], reverse=True word_freq_dict.items(), key=lambda x: x[1], reverse=True
) )
word2id_dict = dict() word2id_dict = {}
word2id_freq = dict() word2id_freq = {}
id2word_dict = dict() id2word_dict = {}
word2id_freq[0] = 1.0 word2id_freq[0] = 1.0
word2id_dict['[oov]'] = 0 word2id_dict['[oov]'] = 0
......
...@@ -255,7 +255,7 @@ class Encoder(Layer): ...@@ -255,7 +255,7 @@ class Encoder(Layer):
super().__init__() super().__init__()
self.encoder_layers = list() self.encoder_layers = []
for i in range(n_layer): for i in range(n_layer):
self.encoder_layers.append( self.encoder_layers.append(
self.add_sublayer( self.add_sublayer(
...@@ -449,7 +449,7 @@ class Decoder(Layer): ...@@ -449,7 +449,7 @@ class Decoder(Layer):
): ):
super().__init__() super().__init__()
self.decoder_layers = list() self.decoder_layers = []
for i in range(n_layer): for i in range(n_layer):
self.decoder_layers.append( self.decoder_layers.append(
self.add_sublayer( self.add_sublayer(
......
...@@ -663,7 +663,7 @@ class OpTest(unittest.TestCase): ...@@ -663,7 +663,7 @@ class OpTest(unittest.TestCase):
type=self.op_type, type=self.op_type,
inputs=inputs, inputs=inputs,
outputs=outputs, outputs=outputs,
attrs=copy(self.attrs) if hasattr(self, "attrs") else dict(), attrs=copy(self.attrs) if hasattr(self, "attrs") else {},
) )
# infer variable type and infer shape in compile-time # infer variable type and infer shape in compile-time
op.desc.infer_var_type(block.desc) op.desc.infer_var_type(block.desc)
...@@ -2259,9 +2259,9 @@ class OpTest(unittest.TestCase): ...@@ -2259,9 +2259,9 @@ class OpTest(unittest.TestCase):
if only_check_prim: if only_check_prim:
return return
self.scope = core.Scope() self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict() op_inputs = self.inputs if hasattr(self, "inputs") else {}
op_outputs = self.outputs if hasattr(self, "outputs") else dict() op_outputs = self.outputs if hasattr(self, "outputs") else {}
op_attrs = self.attrs if hasattr(self, "attrs") else dict() op_attrs = self.attrs if hasattr(self, "attrs") else {}
self._check_grad_helper() self._check_grad_helper()
if self.is_bfloat16_op(): if self.is_bfloat16_op():
......
...@@ -39,7 +39,7 @@ class FeedDataReader: ...@@ -39,7 +39,7 @@ class FeedDataReader:
def _feed_executor(self): def _feed_executor(self):
next_data = next(self._iter) next_data = next(self._iter)
feed_data = dict() feed_data = {}
assert len(self._feed_list) == len(next_data) assert len(self._feed_list) == len(next_data)
for key, value in zip(self._feed_list, next_data): for key, value in zip(self._feed_list, next_data):
feed_data[key] = value feed_data[key] = value
......
...@@ -375,7 +375,7 @@ class PassAutoScanTest(AutoScanTest): ...@@ -375,7 +375,7 @@ class PassAutoScanTest(AutoScanTest):
model_bytes = paddle.static.load_from_file(last_passed_program) model_bytes = paddle.static.load_from_file(last_passed_program)
pg = paddle.static.deserialize_program(model_bytes) pg = paddle.static.deserialize_program(model_bytes)
main_block = pg.desc.block(0) main_block = pg.desc.block(0)
after_op_list = list() after_op_list = []
for i in range(main_block.op_size()): for i in range(main_block.op_size()):
if main_block.op(i).type() in ["feed", "fetch"]: if main_block.op(i).type() in ["feed", "fetch"]:
continue continue
......
...@@ -93,7 +93,7 @@ class OpConfig: ...@@ -93,7 +93,7 @@ class OpConfig:
self.outputs_var_type = outputs_var_type self.outputs_var_type = outputs_var_type
self.attrs = attrs self.attrs = attrs
if self.attrs is None: if self.attrs is None:
self.attrs = dict() self.attrs = {}
self.attrs.update(kwargs) self.attrs.update(kwargs)
def __repr__(self): def __repr__(self):
......
...@@ -121,8 +121,8 @@ class TestConvActOneDNNFusePass(PassAutoScanTest): ...@@ -121,8 +121,8 @@ class TestConvActOneDNNFusePass(PassAutoScanTest):
) )
# 10. Generate legal shape of input:bias of conv2d # 10. Generate legal shape of input:bias of conv2d
inputs = dict() inputs = {}
weights = dict() weights = {}
if draw(st.booleans()): if draw(st.booleans()):
inputs = { inputs = {
'Input': ['input_x'], 'Input': ['input_x'],
......
...@@ -200,7 +200,7 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest): ...@@ -200,7 +200,7 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest):
) )
# 9. Generate legal elemntwise_add: X of conv2d # 9. Generate legal elemntwise_add: X of conv2d
bias_2_dict = dict() bias_2_dict = {}
bias_2_dict[1] = [ bias_2_dict[1] = [
x_shape[0], x_shape[0],
f_shape[0], f_shape[0],
......
...@@ -121,8 +121,8 @@ class TestInt8ScaleCalculationMkldnnPass(PassAutoScanTest): ...@@ -121,8 +121,8 @@ class TestInt8ScaleCalculationMkldnnPass(PassAutoScanTest):
) )
bias_shape = [f_shape[0]] bias_shape = [f_shape[0]]
inputs = dict() inputs = {}
weights = dict() weights = {}
use_mkldnn = True use_mkldnn = True
has_bias = draw(st.booleans()) has_bias = draw(st.booleans())
......
...@@ -129,8 +129,8 @@ class TestConvBiasOneDNNFusePass(PassAutoScanTest): ...@@ -129,8 +129,8 @@ class TestConvBiasOneDNNFusePass(PassAutoScanTest):
# 11. Generate legal shape of input:bias of conv2d # 11. Generate legal shape of input:bias of conv2d
conv_bias_shape = [] conv_bias_shape = []
inputs = dict() inputs = {}
weights = dict() weights = {}
use_mkldnn = None use_mkldnn = None
conv_type = 'conv2d' conv_type = 'conv2d'
if draw(st.booleans()): if draw(st.booleans()):
......
...@@ -205,7 +205,7 @@ def get_multi_pass_desc_from_str(s): ...@@ -205,7 +205,7 @@ def get_multi_pass_desc_from_str(s):
class TestGeneratePass(unittest.TestCase): class TestGeneratePass(unittest.TestCase):
def convert_ops_to_op_dicts(self, ops): def convert_ops_to_op_dicts(self, ops):
op_dicts = dict() op_dicts = {}
for op in ops: for op in ops:
op_list = op_dicts.get(op.type) op_list = op_dicts.get(op.type)
if isinstance(op_list, list): if isinstance(op_list, list):
......
...@@ -22,7 +22,7 @@ def _generate_unique_var_name_sync_with_main_program(prefix): ...@@ -22,7 +22,7 @@ def _generate_unique_var_name_sync_with_main_program(prefix):
def rename_var_with_generator(names_old): def rename_var_with_generator(names_old):
dict_rename_var_old_new = dict() dict_rename_var_old_new = {}
names_old = list(names_old) names_old = list(names_old)
for var_idx, name_old in enumerate(names_old): for var_idx, name_old in enumerate(names_old):
while True: while True:
......
...@@ -108,7 +108,7 @@ class TestReduceSum4DNoReduceSimpleCopyOneDNNOp( ...@@ -108,7 +108,7 @@ class TestReduceSum4DNoReduceSimpleCopyOneDNNOp(
self.op_type = "reduce_sum" self.op_type = "reduce_sum"
self.use_mkldnn = True self.use_mkldnn = True
self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float32")} self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float32")}
self.attrs = {'dim': tuple(), 'use_mkldnn': self.use_mkldnn} self.attrs = {'dim': (), 'use_mkldnn': self.use_mkldnn}
self.outputs = {'Out': np.copy(self.inputs['X'])} self.outputs = {'Out': np.copy(self.inputs['X'])}
......
...@@ -665,7 +665,7 @@ class OpTest(unittest.TestCase): ...@@ -665,7 +665,7 @@ class OpTest(unittest.TestCase):
type=self.op_type, type=self.op_type,
inputs=inputs, inputs=inputs,
outputs=outputs, outputs=outputs,
attrs=copy(self.attrs) if hasattr(self, "attrs") else dict(), attrs=copy(self.attrs) if hasattr(self, "attrs") else {},
) )
# infer variable type and infer shape in compile-time # infer variable type and infer shape in compile-time
op.desc.infer_var_type(block.desc) op.desc.infer_var_type(block.desc)
...@@ -2328,9 +2328,9 @@ class OpTest(unittest.TestCase): ...@@ -2328,9 +2328,9 @@ class OpTest(unittest.TestCase):
check_dygraph = False check_dygraph = False
self.scope = core.Scope() self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict() op_inputs = self.inputs if hasattr(self, "inputs") else {}
op_outputs = self.outputs if hasattr(self, "outputs") else dict() op_outputs = self.outputs if hasattr(self, "outputs") else {}
op_attrs = self.attrs if hasattr(self, "attrs") else dict() op_attrs = self.attrs if hasattr(self, "attrs") else {}
self._check_grad_helper() self._check_grad_helper()
if self.is_bfloat16_op(): if self.is_bfloat16_op():
......
...@@ -240,9 +240,9 @@ class XPUOpTest(OpTest): ...@@ -240,9 +240,9 @@ class XPUOpTest(OpTest):
check_dygraph=True, check_dygraph=True,
): ):
self.scope = core.Scope() self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict() op_inputs = self.inputs if hasattr(self, "inputs") else {}
op_outputs = self.outputs if hasattr(self, "outputs") else dict() op_outputs = self.outputs if hasattr(self, "outputs") else {}
op_attrs = self.attrs if hasattr(self, "attrs") else dict() op_attrs = self.attrs if hasattr(self, "attrs") else {}
self._check_grad_helper() self._check_grad_helper()
if ( if (
......
...@@ -35,8 +35,8 @@ from paddle.fluid import core, framework ...@@ -35,8 +35,8 @@ from paddle.fluid import core, framework
'desired_ops', 'desired_ops',
), ),
( (
('tanh', {'X': ['x']}, {'Out': ['y']}, set(), tuple(), ('tanh_grad',)), ('tanh', {'X': ['x']}, {'Out': ['y']}, set(), (), ('tanh_grad',)),
('empty', {}, {'Out': ['y']}, set(), tuple(), tuple()), ('empty', {}, {'Out': ['y']}, set(), (), ()),
), ),
) )
class TestGetGradOpDescPrimEnabled(unittest.TestCase): class TestGetGradOpDescPrimEnabled(unittest.TestCase):
......
...@@ -40,7 +40,7 @@ from paddle.fluid import core, framework ...@@ -40,7 +40,7 @@ from paddle.fluid import core, framework
{'X': ['x']}, {'X': ['x']},
{'Out': ['y']}, {'Out': ['y']},
set(), set(),
tuple(), (),
( (
'elementwise_mul', 'elementwise_mul',
'fill_constant', 'fill_constant',
...@@ -48,7 +48,7 @@ from paddle.fluid import core, framework ...@@ -48,7 +48,7 @@ from paddle.fluid import core, framework
'elementwise_mul', 'elementwise_mul',
), ),
), ),
('empty', {}, {'Out': ['y']}, set(), tuple(), tuple()), ('empty', {}, {'Out': ['y']}, set(), (), ()),
), ),
) )
class TestGetGradOpDescPrimEnabled(unittest.TestCase): class TestGetGradOpDescPrimEnabled(unittest.TestCase):
......
...@@ -25,7 +25,7 @@ class TestGetGradOpDescPrimEnabled(unittest.TestCase): ...@@ -25,7 +25,7 @@ class TestGetGradOpDescPrimEnabled(unittest.TestCase):
self.inputs = {'X': ['x']} self.inputs = {'X': ['x']}
self.outputs = {'Out': ['y']} self.outputs = {'Out': ['y']}
self.no_grad_var = set() self.no_grad_var = set()
self.grad_sub_block = tuple() self.grad_sub_block = ()
self.desired_ops = 'tanh_grad' self.desired_ops = 'tanh_grad'
self.desired_ops_no_skip = ( self.desired_ops_no_skip = (
'elementwise_mul', 'elementwise_mul',
......
...@@ -50,7 +50,7 @@ class SimpleRNNCell(LayerMixin): ...@@ -50,7 +50,7 @@ class SimpleRNNCell(LayerMixin):
else: else:
self.nonlinearity = lambda x: np.maximum(x, 0.0) self.nonlinearity = lambda x: np.maximum(x, 0.0)
self.parameters = dict() self.parameters = {}
std = 1.0 / math.sqrt(hidden_size) std = 1.0 / math.sqrt(hidden_size)
self.weight_ih = np.random.uniform( self.weight_ih = np.random.uniform(
-std, std, (hidden_size, input_size) -std, std, (hidden_size, input_size)
...@@ -96,7 +96,7 @@ class GRUCell(LayerMixin): ...@@ -96,7 +96,7 @@ class GRUCell(LayerMixin):
self.input_size = input_size self.input_size = input_size
self.hidden_size = hidden_size self.hidden_size = hidden_size
self.bias = bias self.bias = bias
self.parameters = dict() self.parameters = {}
std = 1.0 / math.sqrt(hidden_size) std = 1.0 / math.sqrt(hidden_size)
self.weight_ih = np.random.uniform( self.weight_ih = np.random.uniform(
-std, std, (3 * hidden_size, input_size) -std, std, (3 * hidden_size, input_size)
...@@ -148,7 +148,7 @@ class LSTMCell(LayerMixin): ...@@ -148,7 +148,7 @@ class LSTMCell(LayerMixin):
self.input_size = input_size self.input_size = input_size
self.hidden_size = hidden_size self.hidden_size = hidden_size
self.bias = bias self.bias = bias
self.parameters = dict() self.parameters = {}
std = 1.0 / math.sqrt(hidden_size) std = 1.0 / math.sqrt(hidden_size)
self.weight_ih = np.random.uniform( self.weight_ih = np.random.uniform(
-std, std, (4 * hidden_size, input_size) -std, std, (4 * hidden_size, input_size)
......
...@@ -414,7 +414,7 @@ class TestSparseAdamOp(unittest.TestCase): ...@@ -414,7 +414,7 @@ class TestSparseAdamOp(unittest.TestCase):
scope = core.Scope() scope = core.Scope()
self.setup(scope, place, lazy_mode) self.setup(scope, place, lazy_mode)
op_args = dict() op_args = {}
op_args['lazy_mode'] = lazy_mode op_args['lazy_mode'] = lazy_mode
for key, np_array in self.dense_inputs.items(): for key, np_array in self.dense_inputs.items():
var = scope.var(key).get_tensor() var = scope.var(key).get_tensor()
......
...@@ -174,7 +174,7 @@ class TestPSMinimize(unittest.TestCase): ...@@ -174,7 +174,7 @@ class TestPSMinimize(unittest.TestCase):
""" """
gen sparse config gen sparse config
""" """
sparse_config = dict() sparse_config = {}
# sparse_config['sparse_table_class'] = "DownpourSparseSSDTable" # sparse_config['sparse_table_class'] = "DownpourSparseSSDTable"
sparse_config['sparse_table_class'] = "DownpourSparseTable" sparse_config['sparse_table_class'] = "DownpourSparseTable"
sparse_config['sparse_compress_in_save'] = True sparse_config['sparse_compress_in_save'] = True
...@@ -248,7 +248,7 @@ class TestPSMinimize(unittest.TestCase): ...@@ -248,7 +248,7 @@ class TestPSMinimize(unittest.TestCase):
strategy.a_sync_configs = configs strategy.a_sync_configs = configs
strategy.a_sync = True strategy.a_sync = True
sparse_config = dict() sparse_config = {}
sparse_config['embedding'] = self.gen_sparse_config() sparse_config['embedding'] = self.gen_sparse_config()
strategy.fleet_desc_configs = sparse_config strategy.fleet_desc_configs = sparse_config
......
...@@ -1467,7 +1467,7 @@ class TestRemoteHsigmoid(TestDistLookupTableBase): ...@@ -1467,7 +1467,7 @@ class TestRemoteHsigmoid(TestDistLookupTableBase):
def transpiler_test_impl(self): def transpiler_test_impl(self):
trainer, _ = self.get_trainer() trainer, _ = self.get_trainer()
params_to_check = list() params_to_check = []
for op in trainer.blocks[0].ops: for op in trainer.blocks[0].ops:
if op.type == "hierarchical_sigmoid": if op.type == "hierarchical_sigmoid":
params_to_check = [op.input("W")[0], op.input("Bias")[0]] params_to_check = [op.input("W")[0], op.input("Bias")[0]]
......
...@@ -132,7 +132,7 @@ class TestFlattenOp_5(TestFlattenOp): ...@@ -132,7 +132,7 @@ class TestFlattenOp_5(TestFlattenOp):
class TestFlattenOp_6(TestFlattenOp): class TestFlattenOp_6(TestFlattenOp):
def init_test_case(self): def init_test_case(self):
self.in_shape = tuple() self.in_shape = ()
self.start_axis = 0 self.start_axis = 0
self.stop_axis = -1 self.stop_axis = -1
self.new_shape = (1,) self.new_shape = (1,)
...@@ -317,7 +317,7 @@ class TestDygraphInplaceFlattenPython(unittest.TestCase): ...@@ -317,7 +317,7 @@ class TestDygraphInplaceFlattenPython(unittest.TestCase):
class TestFlatten0DTensorOpError(unittest.TestCase): class TestFlatten0DTensorOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
image_shape = tuple() image_shape = ()
x = np.random.uniform(-1.0, 1.0, []).astype('float32') x = np.random.uniform(-1.0, 1.0, []).astype('float32')
def test_ValueError1(): def test_ValueError1():
......
...@@ -23,7 +23,7 @@ paddle.enable_static() ...@@ -23,7 +23,7 @@ paddle.enable_static()
class TestFleetExecutor(unittest.TestCase): class TestFleetExecutor(unittest.TestCase):
def run_fleet_executor(self, place, fleet_opt=dict()): def run_fleet_executor(self, place, fleet_opt={}):
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
empty_program = paddle.static.Program() empty_program = paddle.static.Program()
with fluid.program_guard(empty_program, empty_program): with fluid.program_guard(empty_program, empty_program):
......
...@@ -70,7 +70,7 @@ class TestLookupTableOpWithPadding(TestFusedEmbeddingSeqPoolOp): ...@@ -70,7 +70,7 @@ class TestLookupTableOpWithPadding(TestFusedEmbeddingSeqPoolOp):
if ver.mkl() == "ON" and 'Linux' in platform.platform(): if ver.mkl() == "ON" and 'Linux' in platform.platform():
ids = np.squeeze(self.ids, axis=2) ids = np.squeeze(self.ids, axis=2)
padding_idx = np.random.choice(ids.flatten(), 1)[0] padding_idx = np.random.choice(ids.flatten(), 1)[0]
output = list() output = []
index = 0 index = 0
for count in self.lod[0]: for count in self.lod[0]:
arr = ids[index : count + index] arr = ids[index : count + index]
......
...@@ -262,7 +262,7 @@ class APITestStaticFusedFFN(unittest.TestCase): ...@@ -262,7 +262,7 @@ class APITestStaticFusedFFN(unittest.TestCase):
dropout2_out = x + F.dropout(x=linear2_out, p=0.0, training=False) dropout2_out = x + F.dropout(x=linear2_out, p=0.0, training=False)
ln_out = F.layer_norm( ln_out = F.layer_norm(
dropout2_out, dropout2_out,
normalized_shape=list([d_model]), normalized_shape=[d_model],
weight=ln2_scale, weight=ln2_scale,
bias=ln2_bias, bias=ln2_bias,
) )
......
...@@ -202,14 +202,14 @@ def _sample_rois( ...@@ -202,14 +202,14 @@ def _sample_rois(
sampled_rois = sampled_boxes * im_scale sampled_rois = sampled_boxes * im_scale
# Faster RCNN blobs # Faster RCNN blobs
frcn_blobs = dict( frcn_blobs = {
rois=sampled_rois, 'rois': sampled_rois,
labels_int32=sampled_labels, 'labels_int32': sampled_labels,
bbox_targets=bbox_targets, 'bbox_targets': bbox_targets,
bbox_inside_weights=bbox_inside_weights, 'bbox_inside_weights': bbox_inside_weights,
bbox_outside_weights=bbox_outside_weights, 'bbox_outside_weights': bbox_outside_weights,
max_overlap=sampled_max_overlap, 'max_overlap': sampled_max_overlap,
) }
return frcn_blobs return frcn_blobs
...@@ -525,7 +525,7 @@ def _generate_groundtruth(images_shape, class_nums, gt_nums): ...@@ -525,7 +525,7 @@ def _generate_groundtruth(images_shape, class_nums, gt_nums):
is_crowd = np.zeros((gt_nums), dtype=np.int32) is_crowd = np.zeros((gt_nums), dtype=np.int32)
is_crowd[0] = 1 is_crowd[0] = 1
ground_truth.append( ground_truth.append(
dict(gt_classes=gt_classes, boxes=gt_boxes, is_crowd=is_crowd) {'gt_classes': gt_classes, 'boxes': gt_boxes, 'is_crowd': is_crowd}
) )
num_gts += len(gt_classes) num_gts += len(gt_classes)
gts_lod.append(num_gts) gts_lod.append(num_gts)
......
...@@ -120,9 +120,9 @@ class TestGroupNormOp(OpTest): ...@@ -120,9 +120,9 @@ class TestGroupNormOp(OpTest):
place = core.CPUPlace() place = core.CPUPlace()
place2 = core.CUDAPlace(0) place2 = core.CUDAPlace(0)
self.scope = core.Scope() self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict() op_inputs = self.inputs if hasattr(self, "inputs") else {}
op_outputs = self.outputs if hasattr(self, "outputs") else dict() op_outputs = self.outputs if hasattr(self, "outputs") else {}
op_attrs = self.attrs if hasattr(self, "attrs") else dict() op_attrs = self.attrs if hasattr(self, "attrs") else {}
self.op = create_op( self.op = create_op(
self.scope, self.op_type, op_inputs, op_outputs, op_attrs self.scope, self.op_type, op_inputs, op_outputs, op_attrs
) )
......
...@@ -343,7 +343,7 @@ class TestHSigmoidOpWithSparseGrad(unittest.TestCase): ...@@ -343,7 +343,7 @@ class TestHSigmoidOpWithSparseGrad(unittest.TestCase):
exe = fluid.Executor(place) exe = fluid.Executor(place)
exe.run(start_up) exe.run(start_up)
result = list() result = []
for i in range(10): for i in range(10):
data = [ data = [
( (
......
...@@ -190,7 +190,7 @@ class TestDygraphDeepCF(unittest.TestCase): ...@@ -190,7 +190,7 @@ class TestDygraphDeepCF(unittest.TestCase):
def load_data(self): def load_data(self):
sys.stderr.write('loading from %s\n' % self.data_path) sys.stderr.write('loading from %s\n' % self.data_path)
likes = dict() likes = {}
num_users = -1 num_users = -1
num_items = -1 num_items = -1
with open(self.data_path, 'r') as f: with open(self.data_path, 'r') as f:
......
...@@ -122,7 +122,7 @@ class TestDygraphGAN(unittest.TestCase): ...@@ -122,7 +122,7 @@ class TestDygraphGAN(unittest.TestCase):
if not core.is_compiled_with_cuda() if not core.is_compiled_with_cuda()
else fluid.CUDAPlace(0) else fluid.CUDAPlace(0)
) )
static_params = dict() static_params = {}
with fluid.scope_guard(scope): with fluid.scope_guard(scope):
img = np.ones([2, 1], np.float32) img = np.ones([2, 1], np.float32)
noise = np.ones([2, 2], np.float32) noise = np.ones([2, 2], np.float32)
...@@ -142,7 +142,7 @@ class TestDygraphGAN(unittest.TestCase): ...@@ -142,7 +142,7 @@ class TestDygraphGAN(unittest.TestCase):
scope.find_var(param.name).get_tensor() scope.find_var(param.name).get_tensor()
) )
dy_params = dict() dy_params = {}
with fluid.dygraph.guard(): with fluid.dygraph.guard():
paddle.seed(1) paddle.seed(1)
paddle.framework.random._manual_program_seed(1) paddle.framework.random._manual_program_seed(1)
...@@ -197,7 +197,7 @@ class TestDygraphGAN(unittest.TestCase): ...@@ -197,7 +197,7 @@ class TestDygraphGAN(unittest.TestCase):
dy_g_loss = g_loss.numpy() dy_g_loss = g_loss.numpy()
dy_d_loss = d_loss.numpy() dy_d_loss = d_loss.numpy()
dy_params2 = dict() dy_params2 = {}
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.seed(1) paddle.seed(1)
......
...@@ -119,8 +119,8 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -119,8 +119,8 @@ class TestDygraphSimpleNet(unittest.TestCase):
learning_rate=1e-3, learning_rate=1e-3,
parameter_list=simple_net.parameters(), parameter_list=simple_net.parameters(),
) )
dy_param_updated = dict() dy_param_updated = {}
dy_param_init = dict() dy_param_init = {}
dy_loss = None dy_loss = None
helper = DyGraphProgramDescTracerTestHelper(self) helper = DyGraphProgramDescTracerTestHelper(self)
...@@ -171,9 +171,9 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -171,9 +171,9 @@ class TestDygraphSimpleNet(unittest.TestCase):
y.desc.set_need_check_feed(False) y.desc.set_need_check_feed(False)
static_loss = simple_net(x, y) static_loss = simple_net(x, y)
sgd.minimize(static_loss) sgd.minimize(static_loss)
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
static_param_name_list = list() static_param_name_list = []
for param in simple_net.parameters(): for param in simple_net.parameters():
static_param_name_list.append(param.name) static_param_name_list.append(param.name)
......
...@@ -71,7 +71,7 @@ class TestImperativeNamedParameters(unittest.TestCase): ...@@ -71,7 +71,7 @@ class TestImperativeNamedParameters(unittest.TestCase):
model = paddle.nn.Sequential(fc1, fc2, custom) model = paddle.nn.Sequential(fc1, fc2, custom)
named_parameters = list(model.named_parameters()) named_parameters = list(model.named_parameters())
expected_named_parameters = list() expected_named_parameters = []
for prefix, layer in model.named_sublayers(): for prefix, layer in model.named_sublayers():
for name, param in layer.named_parameters( for name, param in layer.named_parameters(
include_sublayers=False include_sublayers=False
......
...@@ -268,8 +268,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -268,8 +268,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
sgd = SGDOptimizer( sgd = SGDOptimizer(
learning_rate=1e-3, parameter_list=ptb_model.parameters() learning_rate=1e-3, parameter_list=ptb_model.parameters()
) )
dy_param_updated = dict() dy_param_updated = {}
dy_param_init = dict() dy_param_init = {}
dy_loss = None dy_loss = None
last_hidden = None last_hidden = None
last_cell = None last_cell = None
...@@ -347,9 +347,9 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -347,9 +347,9 @@ class TestDygraphPtbRnn(unittest.TestCase):
x, y, init_hidden, init_cell x, y, init_hidden, init_cell
) )
sgd.minimize(static_loss) sgd.minimize(static_loss)
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
static_param_name_list = list() static_param_name_list = []
for param in ptb_model.parameters(): for param in ptb_model.parameters():
static_param_name_list.append(param.name) static_param_name_list.append(param.name)
......
...@@ -59,8 +59,8 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): ...@@ -59,8 +59,8 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
sgd = SGDOptimizer( sgd = SGDOptimizer(
learning_rate=1e-3, parameter_list=ptb_model.parameters() learning_rate=1e-3, parameter_list=ptb_model.parameters()
) )
dy_param_updated = dict() dy_param_updated = {}
dy_param_init = dict() dy_param_init = {}
dy_loss = None dy_loss = None
last_hidden = None last_hidden = None
last_cell = None last_cell = None
...@@ -135,9 +135,9 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase): ...@@ -135,9 +135,9 @@ class TestDygraphPtbRnnSortGradient(unittest.TestCase):
x, y, init_hidden, init_cell x, y, init_hidden, init_cell
) )
sgd.minimize(static_loss) sgd.minimize(static_loss)
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
static_param_name_list = list() static_param_name_list = []
for param in ptb_model.parameters(): for param in ptb_model.parameters():
static_param_name_list.append(param.name) static_param_name_list.append(param.name)
......
...@@ -282,8 +282,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -282,8 +282,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
adam = Adam( adam = Adam(
learning_rate=scheduler, parameters=ptb_model.parameters() learning_rate=scheduler, parameters=ptb_model.parameters()
) )
dy_param_updated = dict() dy_param_updated = {}
dy_param_init = dict() dy_param_init = {}
dy_loss = None dy_loss = None
last_hidden = None last_hidden = None
last_cell = None last_cell = None
...@@ -385,8 +385,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -385,8 +385,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
adam = Adam( adam = Adam(
learning_rate=scheduler, parameters=ptb_model.parameters() learning_rate=scheduler, parameters=ptb_model.parameters()
) )
dy_param_updated = dict() dy_param_updated = {}
dy_param_init = dict() dy_param_init = {}
dy_loss = None dy_loss = None
last_hidden = None last_hidden = None
last_cell = None last_cell = None
...@@ -507,8 +507,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -507,8 +507,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
adam = Adam( adam = Adam(
learning_rate=scheduler, parameters=ptb_model.parameters() learning_rate=scheduler, parameters=ptb_model.parameters()
) )
dy_param_updated = dict() dy_param_updated = {}
dy_param_init = dict() dy_param_init = {}
dy_loss = None dy_loss = None
last_hidden = None last_hidden = None
last_cell = None last_cell = None
...@@ -625,8 +625,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -625,8 +625,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
adam = Adam( adam = Adam(
learning_rate=scheduler, parameters=ptb_model.parameters() learning_rate=scheduler, parameters=ptb_model.parameters()
) )
dy_param_updated = dict() dy_param_updated = {}
dy_param_init = dict() dy_param_init = {}
dy_loss = None dy_loss = None
last_hidden = None last_hidden = None
last_cell = None last_cell = None
...@@ -741,8 +741,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -741,8 +741,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
beta2=0.6, beta2=0.6,
parameters=ptb_model.parameters(), parameters=ptb_model.parameters(),
) )
dy_param_updated = dict() dy_param_updated = {}
dy_param_init = dict() dy_param_init = {}
dy_loss = None dy_loss = None
last_hidden = None last_hidden = None
last_cell = None last_cell = None
...@@ -838,8 +838,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -838,8 +838,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
beta2=0.6, beta2=0.6,
parameters=ptb_model.parameters(), parameters=ptb_model.parameters(),
) )
dy_param_updated = dict() dy_param_updated = {}
dy_param_init = dict() dy_param_init = {}
dy_loss = None dy_loss = None
last_hidden = None last_hidden = None
last_cell = None last_cell = None
...@@ -943,8 +943,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -943,8 +943,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
beta2=0.6, beta2=0.6,
parameters=ptb_model.parameters(), parameters=ptb_model.parameters(),
) )
dy_param_updated = dict() dy_param_updated = {}
dy_param_init = dict() dy_param_init = {}
dy_loss = None dy_loss = None
last_hidden = None last_hidden = None
last_cell = None last_cell = None
......
...@@ -130,8 +130,8 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -130,8 +130,8 @@ class TestDygraphSimpleNet(unittest.TestCase):
learning_rate=1e-3, learning_rate=1e-3,
parameter_list=simple_net.parameters(), parameter_list=simple_net.parameters(),
) )
dy_param_updated = dict() dy_param_updated = {}
dy_param_init = dict() dy_param_init = {}
dy_loss = None dy_loss = None
fluid.set_flags( fluid.set_flags(
...@@ -181,9 +181,9 @@ class TestDygraphSimpleNet(unittest.TestCase): ...@@ -181,9 +181,9 @@ class TestDygraphSimpleNet(unittest.TestCase):
y.desc.set_need_check_feed(False) y.desc.set_need_check_feed(False)
static_loss = simple_net(x, y) static_loss = simple_net(x, y)
sgd.minimize(static_loss) sgd.minimize(static_loss)
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
static_param_name_list = list() static_param_name_list = []
for param in simple_net.parameters(): for param in simple_net.parameters():
static_param_name_list.append(param.name) static_param_name_list.append(param.name)
......
...@@ -23,7 +23,7 @@ from paddle.fluid import core ...@@ -23,7 +23,7 @@ from paddle.fluid import core
def set_child_signal_handler(self, child_pid): def set_child_signal_handler(self, child_pid):
core._set_process_pids(id(self), tuple([child_pid])) core._set_process_pids(id(self), (child_pid,))
current_handler = signal.getsignal(signal.SIGCHLD) current_handler = signal.getsignal(signal.SIGCHLD)
if not callable(current_handler): if not callable(current_handler):
current_handler = None current_handler = None
......
...@@ -209,7 +209,7 @@ def create_feed_dict_list(data, init=False): ...@@ -209,7 +209,7 @@ def create_feed_dict_list(data, init=False):
+ decoder_data_input_fields[:-1] + decoder_data_input_fields[:-1]
+ label_data_input_fields + label_data_input_fields
) )
feed_dict_list = dict() feed_dict_list = {}
for i in range(len(data_input_names)): for i in range(len(data_input_names)):
feed_dict_list[data_input_names[i]] = data[i] feed_dict_list[data_input_names[i]] = data[i]
return feed_dict_list return feed_dict_list
...@@ -605,7 +605,7 @@ class EncoderLayer(Layer): ...@@ -605,7 +605,7 @@ class EncoderLayer(Layer):
super().__init__() super().__init__()
self._preprocess_cmd = preprocess_cmd self._preprocess_cmd = preprocess_cmd
self._encoder_sublayers = list() self._encoder_sublayers = []
self._prepostprocess_dropout = prepostprocess_dropout self._prepostprocess_dropout = prepostprocess_dropout
self._n_layer = n_layer self._n_layer = n_layer
self._preprocess_layer = PrePostProcessLayer( self._preprocess_layer = PrePostProcessLayer(
...@@ -886,7 +886,7 @@ class DecoderLayer(Layer): ...@@ -886,7 +886,7 @@ class DecoderLayer(Layer):
self._pre_process_layer = PrePostProcessLayer( self._pre_process_layer = PrePostProcessLayer(
d_model, preprocess_cmd, 3 d_model, preprocess_cmd, 3
) )
self._decoder_sub_layers = list() self._decoder_sub_layers = []
self._n_layer = n_layer self._n_layer = n_layer
self._preprocess_cmd = preprocess_cmd self._preprocess_cmd = preprocess_cmd
self._prepostprocess_dropout = prepostprocess_dropout self._prepostprocess_dropout = prepostprocess_dropout
...@@ -1155,8 +1155,8 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): ...@@ -1155,8 +1155,8 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
optimizer = fluid.optimizer.SGD( optimizer = fluid.optimizer.SGD(
learning_rate=0.003, parameter_list=transformer.parameters() learning_rate=0.003, parameter_list=transformer.parameters()
) )
dy_param_init = dict() dy_param_init = {}
dy_param_updated = dict() dy_param_updated = {}
helper = DyGraphProgramDescTracerTestHelper(self) helper = DyGraphProgramDescTracerTestHelper(self)
program = None program = None
...@@ -1238,9 +1238,9 @@ class TestDygraphTransformerSortGradient(unittest.TestCase): ...@@ -1238,9 +1238,9 @@ class TestDygraphTransformerSortGradient(unittest.TestCase):
] ]
label = all_inputs[-2] label = all_inputs[-2]
weights = all_inputs[-1] weights = all_inputs[-1]
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
static_param_name_list = list() static_param_name_list = []
( (
static_sum_cost, static_sum_cost,
static_avg_cost, static_avg_cost,
......
...@@ -107,7 +107,7 @@ class TestIndexAddAPI(unittest.TestCase): ...@@ -107,7 +107,7 @@ class TestIndexAddAPI(unittest.TestCase):
self.check_backward = True self.check_backward = True
self.generate_input_data() self.generate_input_data()
self.index_shape = tuple([self.index_size]) self.index_shape = (self.index_size,)
self.rtol = 1e-5 self.rtol = 1e-5
self.atol = 1e-2 self.atol = 1e-2
......
...@@ -336,7 +336,7 @@ class TestSparseLambOp(unittest.TestCase): ...@@ -336,7 +336,7 @@ class TestSparseLambOp(unittest.TestCase):
scope = core.Scope() scope = core.Scope()
self.setup(scope, place) self.setup(scope, place)
op_args = dict() op_args = {}
for key, np_array in self.dense_inputs.items(): for key, np_array in self.dense_inputs.items():
var = scope.var(key).get_tensor() var = scope.var(key).get_tensor()
var.set(np_array, place) var.set(np_array, place)
......
...@@ -80,7 +80,7 @@ class LSTMCell(LayerMixin): ...@@ -80,7 +80,7 @@ class LSTMCell(LayerMixin):
self.hidden_size = hidden_size self.hidden_size = hidden_size
self.bias = bias self.bias = bias
self.dtype = np.float64 self.dtype = np.float64
self.parameters = dict() self.parameters = {}
self.weight_ih = weight.weight_ih self.weight_ih = weight.weight_ih
self.weight_hh = weight.weight_hh self.weight_hh = weight.weight_hh
self.parameters['weight_ih'] = self.weight_ih self.parameters['weight_ih'] = self.weight_ih
......
...@@ -122,12 +122,14 @@ class TestLUOp(OpTest): ...@@ -122,12 +122,14 @@ class TestLUOp(OpTest):
lshape = np.array(sL.shape) lshape = np.array(sL.shape)
ushape = np.array(sU.shape) ushape = np.array(sU.shape)
lpad = (len(sL.shape) - 2) * [(0, 0)] + list( lpad = (len(sL.shape) - 2) * [(0, 0)] + [
((0, (ashape - lshape)[-2]), (0, (ashape - lshape)[-1])) (0, (ashape - lshape)[-2]),
) (0, (ashape - lshape)[-1]),
upad = (len(sU.shape) - 2) * [(0, 0)] + list( ]
((0, (ashape - ushape)[-2]), (0, (ashape - ushape)[-1])) upad = (len(sU.shape) - 2) * [(0, 0)] + [
) (0, (ashape - ushape)[-2]),
(0, (ashape - ushape)[-1]),
]
NsL = np.pad(sL, lpad) NsL = np.pad(sL, lpad)
NsU = np.pad(sU, upad) NsU = np.pad(sU, upad)
...@@ -262,12 +264,14 @@ class TestLUAPI(unittest.TestCase): ...@@ -262,12 +264,14 @@ class TestLUAPI(unittest.TestCase):
lshape = np.array(sL.shape) lshape = np.array(sL.shape)
ushape = np.array(sU.shape) ushape = np.array(sU.shape)
lpad = (len(sL.shape) - 2) * [(0, 0)] + list( lpad = (len(sL.shape) - 2) * [(0, 0)] + [
((0, (ashape - lshape)[-2]), (0, (ashape - lshape)[-1])) (0, (ashape - lshape)[-2]),
) (0, (ashape - lshape)[-1]),
upad = (len(sU.shape) - 2) * [(0, 0)] + list( ]
((0, (ashape - ushape)[-2]), (0, (ashape - ushape)[-1])) upad = (len(sU.shape) - 2) * [(0, 0)] + [
) (0, (ashape - ushape)[-2]),
(0, (ashape - ushape)[-1]),
]
NsL = np.pad(sL, lpad) NsL = np.pad(sL, lpad)
NsU = np.pad(sU, upad) NsU = np.pad(sU, upad)
......
...@@ -69,8 +69,8 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase): ...@@ -69,8 +69,8 @@ class TestMaxMinAmaxAminAPI(unittest.TestCase):
self.np_grad[func] = grad self.np_grad[func] = grad
self.np_out = dict() self.np_out = {}
self.np_grad = dict() self.np_grad = {}
_cal_np_out_and_gradient('amax') _cal_np_out_and_gradient('amax')
_cal_np_out_and_gradient('amin') _cal_np_out_and_gradient('amin')
_cal_np_out_and_gradient('max') _cal_np_out_and_gradient('max')
......
...@@ -191,7 +191,7 @@ class TestSubsetDataset(unittest.TestCase): ...@@ -191,7 +191,7 @@ class TestSubsetDataset(unittest.TestCase):
label, (fluid.core.VarBase, fluid.core.eager.Tensor) label, (fluid.core.VarBase, fluid.core.eager.Tensor)
) )
elements_list = list() elements_list = []
for _, (input, label) in enumerate(dataloader()): for _, (input, label) in enumerate(dataloader()):
assert_basic(input, label) assert_basic(input, label)
elements_list.append(label) elements_list.append(label)
...@@ -200,7 +200,7 @@ class TestSubsetDataset(unittest.TestCase): ...@@ -200,7 +200,7 @@ class TestSubsetDataset(unittest.TestCase):
assert_basic(input, label) assert_basic(input, label)
elements_list.remove(label) elements_list.remove(label)
odd_list = list() odd_list = []
for _, (input, label) in enumerate(dataloader_odd()): for _, (input, label) in enumerate(dataloader_odd()):
assert_basic(input, label) assert_basic(input, label)
odd_list.append(label) odd_list.append(label)
......
...@@ -212,7 +212,7 @@ class TestOptimizer(unittest.TestCase): ...@@ -212,7 +212,7 @@ class TestOptimizer(unittest.TestCase):
with fluid.program_guard(main_program, init_program): with fluid.program_guard(main_program, init_program):
# reset optimizer._accumulators to avoid duplicate name in loop. # reset optimizer._accumulators to avoid duplicate name in loop.
self.optimizer._accumulators = defaultdict( self.optimizer._accumulators = defaultdict(
lambda: dict() lambda: {}
) )
test_net = self.NetClass( test_net = self.NetClass(
self.optimizer, param_lr, y_no_grad self.optimizer, param_lr, y_no_grad
......
...@@ -61,16 +61,16 @@ class TestInitParallelEnv(unittest.TestCase): ...@@ -61,16 +61,16 @@ class TestInitParallelEnv(unittest.TestCase):
class TestSpawnAssistMethod(unittest.TestCase): class TestSpawnAssistMethod(unittest.TestCase):
def test_nprocs_greater_than_device_num_error(self): def test_nprocs_greater_than_device_num_error(self):
with self.assertRaises(RuntimeError): with self.assertRaises(RuntimeError):
_get_subprocess_env_list(nprocs=100, options=dict()) _get_subprocess_env_list(nprocs=100, options={})
def test_selected_devices_error(self): def test_selected_devices_error(self):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
options = dict() options = {}
options['selected_devices'] = "100,101" options['selected_devices'] = "100,101"
_get_subprocess_env_list(nprocs=2, options=options) _get_subprocess_env_list(nprocs=2, options=options)
def test_get_correct_env(self): def test_get_correct_env(self):
options = dict() options = {}
options['print_config'] = True options['print_config'] = True
env_dict = _get_subprocess_env_list(nprocs=1, options=options)[0] env_dict = _get_subprocess_env_list(nprocs=1, options=options)[0]
self.assertEqual(env_dict['PADDLE_TRAINER_ID'], '0') self.assertEqual(env_dict['PADDLE_TRAINER_ID'], '0')
...@@ -78,12 +78,12 @@ class TestSpawnAssistMethod(unittest.TestCase): ...@@ -78,12 +78,12 @@ class TestSpawnAssistMethod(unittest.TestCase):
def test_nprocs_not_equal_to_selected_devices(self): def test_nprocs_not_equal_to_selected_devices(self):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
options = dict() options = {}
options['selected_devices'] = "100,101,102" options['selected_devices'] = "100,101,102"
_get_subprocess_env_list(nprocs=2, options=options) _get_subprocess_env_list(nprocs=2, options=options)
def test_options_valid_check(self): def test_options_valid_check(self):
options = dict() options = {}
options['selected_devices'] = "100,101,102" options['selected_devices'] = "100,101,102"
_options_valid_check(options) _options_valid_check(options)
......
...@@ -87,7 +87,7 @@ class TestStateDictConvert(unittest.TestCase): ...@@ -87,7 +87,7 @@ class TestStateDictConvert(unittest.TestCase):
class TestStateDictReturn(unittest.TestCase): class TestStateDictReturn(unittest.TestCase):
def test_missing_keys_and_unexpected_keys(self): def test_missing_keys_and_unexpected_keys(self):
model1 = MyModel2() model1 = MyModel2()
tmp_dict = dict() tmp_dict = {}
tmp_dict["unexpected_keys"] = paddle.to_tensor(1) tmp_dict["unexpected_keys"] = paddle.to_tensor(1)
missing_keys, unexpected_keys = model1.set_state_dict(tmp_dict) missing_keys, unexpected_keys = model1.set_state_dict(tmp_dict)
self.assertEqual(len(missing_keys), 2) self.assertEqual(len(missing_keys), 2)
......
...@@ -300,8 +300,8 @@ class TestSaveLoadBase(unittest.TestCase): ...@@ -300,8 +300,8 @@ class TestSaveLoadBase(unittest.TestCase):
x, y, init_hidden, init_cell x, y, init_hidden, init_cell
) )
sgd.minimize(static_loss) sgd.minimize(static_loss)
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
out = exe.run(framework.default_startup_program()) out = exe.run(framework.default_startup_program())
...@@ -441,8 +441,8 @@ class TestSaveLoadPartial(unittest.TestCase): ...@@ -441,8 +441,8 @@ class TestSaveLoadPartial(unittest.TestCase):
) )
sgd.minimize(static_loss) sgd.minimize(static_loss)
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
out = exe.run(framework.default_startup_program()) out = exe.run(framework.default_startup_program())
...@@ -575,8 +575,8 @@ class TestSaveLoadSetStateDict(unittest.TestCase): ...@@ -575,8 +575,8 @@ class TestSaveLoadSetStateDict(unittest.TestCase):
x, y, init_hidden, init_cell x, y, init_hidden, init_cell
) )
sgd.minimize(static_loss) sgd.minimize(static_loss)
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
out = exe.run(framework.default_startup_program()) out = exe.run(framework.default_startup_program())
...@@ -714,8 +714,8 @@ class TestProgramStatePartial(unittest.TestCase): ...@@ -714,8 +714,8 @@ class TestProgramStatePartial(unittest.TestCase):
) )
sgd.minimize(static_loss) sgd.minimize(static_loss)
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
out = exe.run(framework.default_startup_program()) out = exe.run(framework.default_startup_program())
...@@ -1042,8 +1042,8 @@ class TestLoadFromOldInterface(unittest.TestCase): ...@@ -1042,8 +1042,8 @@ class TestLoadFromOldInterface(unittest.TestCase):
test_clone_program = fluid.default_main_program().clone() test_clone_program = fluid.default_main_program().clone()
sgd.minimize(static_loss) sgd.minimize(static_loss)
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
out = exe.run(framework.default_startup_program()) out = exe.run(framework.default_startup_program())
...@@ -1184,8 +1184,8 @@ class TestLoadFromOldInterface(unittest.TestCase): ...@@ -1184,8 +1184,8 @@ class TestLoadFromOldInterface(unittest.TestCase):
test_clone_program = fluid.default_main_program().clone() test_clone_program = fluid.default_main_program().clone()
sgd.minimize(static_loss) sgd.minimize(static_loss)
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
out = exe.run(framework.default_startup_program()) out = exe.run(framework.default_startup_program())
...@@ -1326,8 +1326,8 @@ class TestLoadFromOldInterfaceSingleFile(unittest.TestCase): ...@@ -1326,8 +1326,8 @@ class TestLoadFromOldInterfaceSingleFile(unittest.TestCase):
x, y, init_hidden, init_cell x, y, init_hidden, init_cell
) )
sgd.minimize(static_loss) sgd.minimize(static_loss)
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
out = exe.run(framework.default_startup_program()) out = exe.run(framework.default_startup_program())
...@@ -1528,8 +1528,8 @@ class TestProgramStateOldSave(unittest.TestCase): ...@@ -1528,8 +1528,8 @@ class TestProgramStateOldSave(unittest.TestCase):
) )
sgd.minimize(static_loss) sgd.minimize(static_loss)
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
out = exe.run(framework.default_startup_program()) out = exe.run(framework.default_startup_program())
...@@ -1704,8 +1704,8 @@ class TestProgramStateOldSaveSingleModel(unittest.TestCase): ...@@ -1704,8 +1704,8 @@ class TestProgramStateOldSaveSingleModel(unittest.TestCase):
) )
sgd.minimize(static_loss) sgd.minimize(static_loss)
static_param_updated = dict() static_param_updated = {}
static_param_init = dict() static_param_init = {}
out = exe.run(framework.default_startup_program()) out = exe.run(framework.default_startup_program())
......
...@@ -19,7 +19,7 @@ from paddle.fluid.op import Operator ...@@ -19,7 +19,7 @@ from paddle.fluid.op import Operator
def create_op(scope, op_type, inputs, outputs, attrs, cache_list=None): def create_op(scope, op_type, inputs, outputs, attrs, cache_list=None):
kwargs = dict() kwargs = {}
op_maker = core.op_proto_and_checker_maker op_maker = core.op_proto_and_checker_maker
op_role_attr_name = op_maker.kOpRoleAttrName() op_role_attr_name = op_maker.kOpRoleAttrName()
......
...@@ -391,7 +391,7 @@ class TestSparseAdamOp(unittest.TestCase): ...@@ -391,7 +391,7 @@ class TestSparseAdamOp(unittest.TestCase):
scope = core.Scope() scope = core.Scope()
self.setup(scope, place, lazy_mode) self.setup(scope, place, lazy_mode)
op_args = dict() op_args = {}
op_args['lazy_mode'] = lazy_mode op_args['lazy_mode'] = lazy_mode
for key, np_array in self.dense_inputs.items(): for key, np_array in self.dense_inputs.items():
var = scope.var(key).get_tensor() var = scope.var(key).get_tensor()
......
...@@ -252,7 +252,7 @@ class APITestStaticFusedFFN(unittest.TestCase): ...@@ -252,7 +252,7 @@ class APITestStaticFusedFFN(unittest.TestCase):
dropout2_out = x + F.dropout(x=linear2_out, p=0.0, training=False) dropout2_out = x + F.dropout(x=linear2_out, p=0.0, training=False)
ln_out = F.layer_norm( ln_out = F.layer_norm(
dropout2_out, dropout2_out,
normalized_shape=list([d_model]), normalized_shape=[d_model],
weight=ln2_scale, weight=ln2_scale,
bias=ln2_bias, bias=ln2_bias,
) )
......
...@@ -90,7 +90,7 @@ def _load_state_dict_from_save_inference_model(model_path, config): ...@@ -90,7 +90,7 @@ def _load_state_dict_from_save_inference_model(model_path, config):
) )
# 3. construct state_dict # 3. construct state_dict
load_param_dict = dict() load_param_dict = {}
for var_name in persistable_var_dict: for var_name in persistable_var_dict:
load_param_dict[var_name] = persistable_var_dict[var_name].numpy() load_param_dict[var_name] = persistable_var_dict[var_name].numpy()
...@@ -100,7 +100,7 @@ def _load_state_dict_from_save_inference_model(model_path, config): ...@@ -100,7 +100,7 @@ def _load_state_dict_from_save_inference_model(model_path, config):
if os.path.exists(var_info_path): if os.path.exists(var_info_path):
with open(var_info_path, 'rb') as f: with open(var_info_path, 'rb') as f:
extra_var_info = pickle.load(f) extra_var_info = pickle.load(f)
structured_para_dict = dict() structured_para_dict = {}
for var_name in load_param_dict: for var_name in load_param_dict:
structured_name = extra_var_info[var_name].get( structured_name = extra_var_info[var_name].get(
'structured_name', None 'structured_name', None
...@@ -144,7 +144,7 @@ def _load_state_dict_from_save_params(model_path): ...@@ -144,7 +144,7 @@ def _load_state_dict_from_save_params(model_path):
load_var_list.append(new_var) load_var_list.append(new_var)
# 3. construct state_dict # 3. construct state_dict
load_param_dict = dict() load_param_dict = {}
for var in load_var_list: for var in load_var_list:
load_param_dict[var.name] = var.numpy() load_param_dict[var.name] = var.numpy()
...@@ -306,7 +306,7 @@ def _pickle_save(obj, f, protocol): ...@@ -306,7 +306,7 @@ def _pickle_save(obj, f, protocol):
"paddle do not support saving `paddle.nn.Layer` object." "paddle do not support saving `paddle.nn.Layer` object."
) )
dispatch_table_layer = dict() dispatch_table_layer = {}
def create_layer_dispatch_table(layer): def create_layer_dispatch_table(layer):
dispatch_table_layer[layer.__class__] = reduce_Layer dispatch_table_layer[layer.__class__] = reduce_Layer
......
...@@ -1062,14 +1062,14 @@ class WandbCallback(Callback): ...@@ -1062,14 +1062,14 @@ class WandbCallback(Callback):
"You want to use `wandb` which is not installed yet install it with `pip install wandb`", "You want to use `wandb` which is not installed yet install it with `pip install wandb`",
) )
self.wandb_args = dict( self.wandb_args = {
project=project, 'project': project,
name=name, 'name': name,
entity=entity, 'entity': entity,
dir=dir, 'dir': dir,
mode=mode, 'mode': mode,
job_type=job_type, 'job_type': job_type,
) }
self._run = None self._run = None
self.wandb_args.update(**kwargs) self.wandb_args.update(**kwargs)
...@@ -1121,7 +1121,7 @@ class WandbCallback(Callback): ...@@ -1121,7 +1121,7 @@ class WandbCallback(Callback):
metrics = getattr(self, '%s_metrics' % (mode)) metrics = getattr(self, '%s_metrics' % (mode))
current_step = getattr(self, '%s_step' % (mode)) current_step = getattr(self, '%s_step' % (mode))
_metrics = dict() _metrics = {}
if mode == 'train': if mode == 'train':
total_step = current_step total_step = current_step
......
...@@ -186,7 +186,7 @@ class DistributedStrategy: ...@@ -186,7 +186,7 @@ class DistributedStrategy:
self.debug_opt = opt_info self.debug_opt = opt_info
def get_debug_opt(self): def get_debug_opt(self):
opt_info = dict() opt_info = {}
if self.debug_opt is not None and isinstance(self.debug_opt, dict): if self.debug_opt is not None and isinstance(self.debug_opt, dict):
opt_info["dump_slot"] = bool(self.debug_opt.get("dump_slot", 0)) opt_info["dump_slot"] = bool(self.debug_opt.get("dump_slot", 0))
opt_info["dump_converter"] = str( opt_info["dump_converter"] = str(
......
...@@ -1190,8 +1190,8 @@ class CompileTimeStrategy: ...@@ -1190,8 +1190,8 @@ class CompileTimeStrategy:
sparse_pairs, dense_pairs = self.get_param_grads() sparse_pairs, dense_pairs = self.get_param_grads()
origin_for_sparse = [] origin_for_sparse = []
origin_for_dense = [] origin_for_dense = []
param_name_grad_name = dict() param_name_grad_name = {}
grad_name_to_param_name = dict() grad_name_to_param_name = {}
for param, grad in sparse_pairs: for param, grad in sparse_pairs:
param = vars_metatools.create_var_struct(param) param = vars_metatools.create_var_struct(param)
......
...@@ -179,7 +179,7 @@ def distributed_ops_pass(program, config, use_ps_gpu=False): ...@@ -179,7 +179,7 @@ def distributed_ops_pass(program, config, use_ps_gpu=False):
if input_indexes[i] == 1: if input_indexes[i] == 1:
move_ops.append((global_block.ops[i], i)) move_ops.append((global_block.ops[i], i))
for i, op in enumerate(move_ops): for i, op in enumerate(move_ops):
queue = list() queue = []
visited = set() visited = set()
queue.append(op[1]) queue.append(op[1])
visited.add(op[0]) visited.add(op[0])
......
...@@ -80,7 +80,7 @@ class DownpourServer(Server): ...@@ -80,7 +80,7 @@ class DownpourServer(Server):
% (table_id, pslib.PS_SPARSE_TABLE, table.type) % (table_id, pslib.PS_SPARSE_TABLE, table.type)
) )
if strategy is None: if strategy is None:
strategy = dict() strategy = {}
table = self._server.downpour_server_param.downpour_table_param.add() table = self._server.downpour_server_param.downpour_table_param.add()
table.table_id = table_id table.table_id = table_id
table.type = pslib.PS_SPARSE_TABLE table.type = pslib.PS_SPARSE_TABLE
...@@ -393,7 +393,7 @@ class DownpourServer(Server): ...@@ -393,7 +393,7 @@ class DownpourServer(Server):
) )
if strategy is None: if strategy is None:
strategy = dict() strategy = {}
table = self._server.downpour_server_param.downpour_table_param.add() table = self._server.downpour_server_param.downpour_table_param.add()
table.table_id = table_id table.table_id = table_id
support_dense_key_list = [ support_dense_key_list = [
...@@ -484,7 +484,7 @@ class DownpourServer(Server): ...@@ -484,7 +484,7 @@ class DownpourServer(Server):
% (table_id, pslib.PS_DENSE_TABLE, table.type) % (table_id, pslib.PS_DENSE_TABLE, table.type)
) )
if strategy is None: if strategy is None:
strategy = dict() strategy = {}
support_datanorm_key_list = [ support_datanorm_key_list = [
'datanorm_table_class', 'datanorm_table_class',
......
...@@ -125,7 +125,7 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -125,7 +125,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
inputs inputs
""" """
local_vars = program.current_block().vars local_vars = program.current_block().vars
inputs_dict = dict() inputs_dict = {}
for table_name in table_names: for table_name in table_names:
inputs_dict[table_name] = [] inputs_dict[table_name] = []
...@@ -148,7 +148,7 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -148,7 +148,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
outputs outputs
""" """
local_vars = program.current_block().vars local_vars = program.current_block().vars
outputs_dict = dict() outputs_dict = {}
for table_name in table_names: for table_name in table_names:
outputs_dict[table_name] = [] outputs_dict[table_name] = []
...@@ -162,7 +162,7 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -162,7 +162,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
def _find_distributed_lookup_table_grads(self, program, table_names): def _find_distributed_lookup_table_grads(self, program, table_names):
local_vars = program.current_block().vars local_vars = program.current_block().vars
grads_dict = dict() grads_dict = {}
for table_name in table_names: for table_name in table_names:
grads_dict[table_name] = [] grads_dict[table_name] = []
...@@ -281,7 +281,7 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -281,7 +281,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
% (len(params), len(grads)) % (len(params), len(grads))
) )
pname2grad = dict() pname2grad = {}
for i in range(len(params)): for i in range(len(params)):
pname = params[i].name pname = params[i].name
gname = grads[i].name gname = grads[i].name
...@@ -316,8 +316,8 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -316,8 +316,8 @@ class DistributedAdam(DistributedOptimizerImplBase):
lists_grads = [[] for i in range(len(cond_params.keys()))] lists_grads = [[] for i in range(len(cond_params.keys()))]
key_id = 0 key_id = 0
name2key = dict() name2key = {}
cond2denseid = dict() cond2denseid = {}
for key, value in cond_params.items(): for key, value in cond_params.items():
cond2denseid[key] = dense_table_id cond2denseid[key] = dense_table_id
dense_tables.append(dense_table_id) dense_tables.append(dense_table_id)
...@@ -341,7 +341,7 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -341,7 +341,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
) )
def _gen_distributed_emb_to_size_dict(self, program): def _gen_distributed_emb_to_size_dict(self, program):
d_size = dict() d_size = {}
local_vars = program.current_block().vars local_vars = program.current_block().vars
for op in program.global_block().ops: for op in program.global_block().ops:
...@@ -363,7 +363,7 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -363,7 +363,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
self, strategy, table_name, emb_to_size self, strategy, table_name, emb_to_size
): ):
if strategy.get(table_name) is None: if strategy.get(table_name) is None:
strategy[table_name] = dict() strategy[table_name] = {}
st = strategy[table_name] st = strategy[table_name]
accessor = "DownpourCtrAccessor" accessor = "DownpourCtrAccessor"
...@@ -521,7 +521,7 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -521,7 +521,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
# has condition_block op means multi-task # has condition_block op means multi-task
flag_multi_task = self._has_conditional_block(loss) flag_multi_task = self._has_conditional_block(loss)
if flag_multi_task: if flag_multi_task:
self._cond_params = dict() self._cond_params = {}
self._other_params = [] self._other_params = []
now_program = loss.block.program now_program = loss.block.program
root_block = now_program.block(0) root_block = now_program.block(0)
...@@ -530,8 +530,8 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -530,8 +530,8 @@ class DistributedAdam(DistributedOptimizerImplBase):
all_params.append(par.name) all_params.append(par.name)
ops_ = root_block.ops ops_ = root_block.ops
fill_value_dict = dict() fill_value_dict = {}
equal_fill_dict = dict() equal_fill_dict = {}
for op in ops_: for op in ops_:
# conditional_block op must has fill_constant and equal op # conditional_block op must has fill_constant and equal op
if op.type == 'fill_constant': if op.type == 'fill_constant':
...@@ -635,7 +635,7 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -635,7 +635,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
print("emb_to_table ", emb_to_table) print("emb_to_table ", emb_to_table)
raise ValueError("key error: %s" % key) raise ValueError("key error: %s" % key)
if strategy.get(key) is None: if strategy.get(key) is None:
strategy[key] = dict() strategy[key] = {}
st = strategy[key] st = strategy[key]
accessor = None accessor = None
......
...@@ -336,7 +336,7 @@ class MoELayer(nn.Layer): ...@@ -336,7 +336,7 @@ class MoELayer(nn.Layer):
self.recompute_ctx = recompute_ctx self.recompute_ctx = recompute_ctx
if gate is None: if gate is None:
gate = dict() gate = {}
assert isinstance( assert isinstance(
gate, (dict, BaseGate) gate, (dict, BaseGate)
......
...@@ -173,7 +173,7 @@ def _state_dict_groups(state_dict, max_size): ...@@ -173,7 +173,7 @@ def _state_dict_groups(state_dict, max_size):
max_size = max(max_size, max_tensor_size) max_size = max(max_size, max_tensor_size)
logger.debug(f"max tensor size: {max_size}") logger.debug(f"max tensor size: {max_size}")
state_group = dict() state_group = {}
k_list = list(state_dict.keys()) k_list = list(state_dict.keys())
index = 0 index = 0
bits = 0 bits = 0
...@@ -185,7 +185,7 @@ def _state_dict_groups(state_dict, max_size): ...@@ -185,7 +185,7 @@ def _state_dict_groups(state_dict, max_size):
) )
if bits + bsize >= max_size: if bits + bsize >= max_size:
yield state_group yield state_group
state_group = dict() state_group = {}
bits = 0 bits = 0
state_group[k_list[index]] = state_dict[k_list[index]] state_group[k_list[index]] = state_dict[k_list[index]]
...@@ -317,7 +317,7 @@ def _grouped_gather_data_dict(state_data_dict, dst, group, max_size): ...@@ -317,7 +317,7 @@ def _grouped_gather_data_dict(state_data_dict, dst, group, max_size):
) )
total = 0 total = 0
output_state = dict() output_state = {}
logger.info("start all gather ...") logger.info("start all gather ...")
# gather all state_dict by groups # gather all state_dict by groups
......
...@@ -333,7 +333,7 @@ def _name_mapping_dist2single(state_dict, pp_group): ...@@ -333,7 +333,7 @@ def _name_mapping_dist2single(state_dict, pp_group):
def _get_wrapped_dist_state_dict(dist_state_dict): def _get_wrapped_dist_state_dict(dist_state_dict):
wrapped_state_dict = dict() wrapped_state_dict = {}
if dist.get_world_size() <= 1: if dist.get_world_size() <= 1:
for _, v in dist_state_dict.items(): for _, v in dist_state_dict.items():
wrapped_state_dict[v.name] = v wrapped_state_dict[v.name] = v
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册