未验证 提交 9fa98349 编写于 作者: I Infinity_lee 提交者: GitHub

[CodeStyle][C405] Unnecessary <list/tuple> literal - rewrite as a set literal (#51972)

上级 7aa7fc49
...@@ -19,57 +19,55 @@ import yaml ...@@ -19,57 +19,55 @@ import yaml
#################### ####################
# Global Variables # # Global Variables #
#################### ####################
ops_to_fill_zero_for_empty_grads = set( ops_to_fill_zero_for_empty_grads = {
[ "split_grad",
"split_grad", "split_with_num_grad",
"split_with_num_grad", "rnn_grad",
"rnn_grad", "matmul_double_grad",
"matmul_double_grad", "matmul_triple_grad",
"matmul_triple_grad", "sigmoid_double_grad",
"sigmoid_double_grad", "sigmoid_triple_grad",
"sigmoid_triple_grad", "add_double_grad",
"add_double_grad", "add_triple_grad",
"add_triple_grad", "multiply_grad",
"multiply_grad", "multiply_double_grad",
"multiply_double_grad", "multiply_triple_grad",
"multiply_triple_grad", "conv2d_grad_grad",
"conv2d_grad_grad", "conv2d_transpose_double_grad",
"conv2d_transpose_double_grad", "batch_norm_double_grad",
"batch_norm_double_grad", "tanh_grad",
"tanh_grad", "tanh_double_grad",
"tanh_double_grad", "tanh_triple_grad",
"tanh_triple_grad", "sin_double_grad",
"sin_double_grad", "sin_triple_grad",
"sin_triple_grad", "cos_double_grad",
"cos_double_grad", "cos_triple_grad",
"cos_triple_grad", "subtract_double_grad",
"subtract_double_grad", "divide_double_grad",
"divide_double_grad", "log_double_grad",
"log_double_grad", "elu_double_grad",
"elu_double_grad", "leaky_relu_double_grad",
"leaky_relu_double_grad", "sqrt_double_grad",
"sqrt_double_grad", "rsqrt_double_grad",
"rsqrt_double_grad", "square_double_grad",
"square_double_grad", "celu_double_grad",
"celu_double_grad", "pad_double_grad",
"pad_double_grad", "pad3d_double_grad",
"pad3d_double_grad", "squeeze_double_grad",
"squeeze_double_grad", "unsqueeze_double_grad",
"unsqueeze_double_grad", "instance_norm_double_grad",
"instance_norm_double_grad", "conv3d_double_grad",
"conv3d_double_grad", "depthwise_conv2d_grad_grad",
"depthwise_conv2d_grad_grad", "concat_double_grad",
"concat_double_grad", "expand_grad",
"expand_grad", "argsort_grad",
"argsort_grad", "eigh_grad",
"eigh_grad", "add_grad",
"add_grad", "subtract_grad",
"subtract_grad", "multiply_grad",
"multiply_grad", "divide_grad",
"divide_grad", "matmul_grad",
"matmul_grad", }
]
)
# For API dispatch used at python-level # For API dispatch used at python-level
# { op_name : [arg_name, ...] } # { op_name : [arg_name, ...] }
......
...@@ -48,7 +48,7 @@ from codegen_utils import ( ...@@ -48,7 +48,7 @@ from codegen_utils import (
# But because there is no check in old dygraph mode, in order to # But because there is no check in old dygraph mode, in order to
# keeping the code compatible, here we also skip inplace check in new dygraph temporarily, # keeping the code compatible, here we also skip inplace check in new dygraph temporarily,
# and this will be fixed in the futrue. # and this will be fixed in the futrue.
inplace_check_blacklist = set(["assign_out_"]) inplace_check_blacklist = {"assign_out_"}
# Black Ops list that's NO NEED to apply code generation # Black Ops list that's NO NEED to apply code generation
black_ops_list = [ black_ops_list = [
......
...@@ -26,7 +26,7 @@ from codegen_utils import ( ...@@ -26,7 +26,7 @@ from codegen_utils import (
######################### #########################
# Global Configurations # # Global Configurations #
######################### #########################
skipped_forward_api_names = set([]) skipped_forward_api_names = set()
def SkipAPIGeneration(forward_api_name): def SkipAPIGeneration(forward_api_name):
......
...@@ -36,6 +36,9 @@ select = [ ...@@ -36,6 +36,9 @@ select = [
"C400", "C400",
"C401", "C401",
"C402", "C402",
"C403",
"C404",
"C405",
"C408", "C408",
"C409", "C409",
"C410", "C410",
......
...@@ -482,7 +482,7 @@ class Converter: ...@@ -482,7 +482,7 @@ class Converter:
split_indices_list = partition_index split_indices_list = partition_index
split_indices_list = list( split_indices_list = list(
map( map(
lambda x, y: list(set(x) - set([y]) - set([0])), lambda x, y: list(set(x) - {y} - {0}),
split_indices_list, split_indices_list,
complete_shape, complete_shape,
) )
......
...@@ -119,7 +119,7 @@ class ShardingStageAlgorithm(AlgorithmBase): ...@@ -119,7 +119,7 @@ class ShardingStageAlgorithm(AlgorithmBase):
stage_range = self._config.sharding.get("tuning_range", None) stage_range = self._config.sharding.get("tuning_range", None)
if stage_range: if stage_range:
assert set(stage_range).issubset( assert set(stage_range).issubset(
set([0, 1, 2, 3]) {0, 1, 2, 3}
), "Sharding Stage should belong into range within 0 - 3 but got {}.".format( ), "Sharding Stage should belong into range within 0 - 3 but got {}.".format(
stage_range stage_range
) )
......
...@@ -1172,7 +1172,7 @@ def _get_split_indices( ...@@ -1172,7 +1172,7 @@ def _get_split_indices(
split_indices_list = partition_index split_indices_list = partition_index
split_indices_list = list( split_indices_list = list(
map( map(
lambda x, y: list(set(x) - set([y]) - set([0])), lambda x, y: list(set(x) - {y} - {0}),
split_indices_list, split_indices_list,
complete_shape, complete_shape,
) )
......
...@@ -303,7 +303,7 @@ class ParameterServerOptimizer(MetaOptimizerBase): ...@@ -303,7 +303,7 @@ class ParameterServerOptimizer(MetaOptimizerBase):
vars_metatools, vars_metatools,
) )
processed_var_names = set(["@EMPTY@"]) processed_var_names = {"@EMPTY@"}
param_memory_size = 0 param_memory_size = 0
for varname in program.global_block().vars: for varname in program.global_block().vars:
var = program.global_block().vars[varname] var = program.global_block().vars[varname]
......
...@@ -208,7 +208,7 @@ class ParameterServerOptimizer(MetaOptimizerBase): ...@@ -208,7 +208,7 @@ class ParameterServerOptimizer(MetaOptimizerBase):
return False return False
free = get_sys_free_mem() free = get_sys_free_mem()
processed_var_names = set(["@EMPTY@"]) processed_var_names = {"@EMPTY@"}
param_memory_size = 0 param_memory_size = 0
for varname in program.global_block().vars: for varname in program.global_block().vars:
var = program.global_block().vars[varname] var = program.global_block().vars[varname]
......
...@@ -27,7 +27,7 @@ class Shard: ...@@ -27,7 +27,7 @@ class Shard:
def __init__( def __init__(
self, self,
): ):
self.global_params = set([]) self.global_params = set()
self.worker_idx = -1 self.worker_idx = -1
self.worker_num = -1 self.worker_num = -1
self.global_param2device = {} self.global_param2device = {}
...@@ -96,8 +96,8 @@ class Shard: ...@@ -96,8 +96,8 @@ class Shard:
return -1 return -1
def find_broadcast_params(self, block): def find_broadcast_params(self, block):
broadcast_vars = set([]) broadcast_vars = set()
fp16_params = set([]) fp16_params = set()
fp16_to_fp32 = {} fp16_to_fp32 = {}
param_usage = {x: 0 for x in self.global_params} param_usage = {x: 0 for x in self.global_params}
......
...@@ -981,7 +981,7 @@ def add_sync_comm(program, sharding_ring_id): ...@@ -981,7 +981,7 @@ def add_sync_comm(program, sharding_ring_id):
assert sharding_ring_id >= 0, "sharding_ring_id should larger than zero" assert sharding_ring_id >= 0, "sharding_ring_id should larger than zero"
block = program.global_block() block = program.global_block()
not_sync_vars = set([]) not_sync_vars = set()
for op in block.ops: for op in block.ops:
if op.type in ["c_broadcast", "c_allreduce"]: if op.type in ["c_broadcast", "c_allreduce"]:
for input_name in op.desc.input_arg_names(): for input_name in op.desc.input_arg_names():
......
...@@ -78,8 +78,8 @@ class ShardingOptimizer(MetaOptimizerBase): ...@@ -78,8 +78,8 @@ class ShardingOptimizer(MetaOptimizerBase):
self._startup_program = None self._startup_program = None
self._segments = [] self._segments = []
# params and fp16 params is for broadcast # params and fp16 params is for broadcast
self._params = set([]) self._params = set()
self._broadcast_vars = set([]) self._broadcast_vars = set()
# reduced grads to param name # reduced grads to param name
self._reduced_grads_to_param = {} self._reduced_grads_to_param = {}
self._shard = Shard() self._shard = Shard()
......
...@@ -1831,8 +1831,8 @@ class ShardingInfo: ...@@ -1831,8 +1831,8 @@ class ShardingInfo:
# and sharding should only broadcast the casted fp16 param # and sharding should only broadcast the casted fp16 param
# instead of the origin fp32 version param. # instead of the origin fp32 version param.
def get_broadcast_vars_and_param_usage(self, block): def get_broadcast_vars_and_param_usage(self, block):
broadcast_vars = set([]) broadcast_vars = set()
fp16_params = set([]) fp16_params = set()
fp16_to_fp32 = {} fp16_to_fp32 = {}
param_usage = {x: 0 for x in self.param_names} param_usage = {x: 0 for x in self.param_names}
......
...@@ -123,21 +123,21 @@ class TestDistPNormDP(TestDistPNorm): ...@@ -123,21 +123,21 @@ class TestDistPNormDP(TestDistPNorm):
assert op_dist_attr.impl_type == "p_norm" assert op_dist_attr.impl_type == "p_norm"
if op.type in ["p_norm", "p_norm_grad"]: if op.type in ["p_norm", "p_norm_grad"]:
for input_attr in op_dist_attr.inputs_dist_attrs.values(): for input_attr in op_dist_attr.inputs_dist_attrs.values():
assert set(input_attr.dims_mapping) == set([-1]) assert set(input_attr.dims_mapping) == {-1}
for output_attr in op_dist_attr.outputs_dist_attrs.values(): for output_attr in op_dist_attr.outputs_dist_attrs.values():
assert set(output_attr.dims_mapping) == set([-1]) assert set(output_attr.dims_mapping) == {-1}
if op.type == 'c_allgather': if op.type == 'c_allgather':
for input_attr in op_dist_attr.inputs_dist_attrs.values(): for input_attr in op_dist_attr.inputs_dist_attrs.values():
assert input_attr.dims_mapping[0] == 0 assert input_attr.dims_mapping[0] == 0
assert set(input_attr.dims_mapping[1:]) == set([-1]) assert set(input_attr.dims_mapping[1:]) == {-1}
for output_attr in op_dist_attr.outputs_dist_attrs.values(): for output_attr in op_dist_attr.outputs_dist_attrs.values():
assert set(output_attr.dims_mapping) == set([-1]) assert set(output_attr.dims_mapping) == {-1}
if op.type == 'slice': if op.type == 'slice':
for input_attr in op_dist_attr.inputs_dist_attrs.values(): for input_attr in op_dist_attr.inputs_dist_attrs.values():
assert set(input_attr.dims_mapping) == set([-1]) assert set(input_attr.dims_mapping) == {-1}
for output_attr in op_dist_attr.outputs_dist_attrs.values(): for output_attr in op_dist_attr.outputs_dist_attrs.values():
assert output_attr.dims_mapping[0] == 0 assert output_attr.dims_mapping[0] == 0
assert set(output_attr.dims_mapping[1:]) == set([-1]) assert set(output_attr.dims_mapping[1:]) == {-1}
assert op_types == [ assert op_types == [
"c_allgather", "c_allgather",
"p_norm", "p_norm",
......
...@@ -322,39 +322,35 @@ class TestDistMPTraning(unittest.TestCase): ...@@ -322,39 +322,35 @@ class TestDistMPTraning(unittest.TestCase):
) )
def test_sharding_adam(self): def test_sharding_adam(self):
sharded_accumulators = set( sharded_accumulators = {
[ 'linear_0.w_0_moment1_0',
'linear_0.w_0_moment1_0', 'linear_1.b_0_moment1_0',
'linear_1.b_0_moment1_0', 'linear_2.b_0_moment1_0',
'linear_2.b_0_moment1_0', 'embedding_0.w_0_moment1_0',
'embedding_0.w_0_moment1_0', 'linear_0.w_0_moment2_0',
'linear_0.w_0_moment2_0', 'linear_1.b_0_moment2_0',
'linear_1.b_0_moment2_0', 'linear_2.b_0_moment2_0',
'linear_2.b_0_moment2_0', 'embedding_0.w_0_moment2_0',
'embedding_0.w_0_moment2_0', 'linear_0.w_0_beta1_pow_acc_0',
'linear_0.w_0_beta1_pow_acc_0', 'linear_1.b_0_beta1_pow_acc_0',
'linear_1.b_0_beta1_pow_acc_0', 'linear_2.b_0_beta1_pow_acc_0',
'linear_2.b_0_beta1_pow_acc_0', 'embedding_0.w_0_beta1_pow_acc_0',
'embedding_0.w_0_beta1_pow_acc_0', 'linear_0.w_0_beta2_pow_acc_0',
'linear_0.w_0_beta2_pow_acc_0', 'linear_1.b_0_beta2_pow_acc_0',
'linear_1.b_0_beta2_pow_acc_0', 'linear_2.b_0_beta2_pow_acc_0',
'linear_2.b_0_beta2_pow_acc_0', 'embedding_0.w_0_beta2_pow_acc_0',
'embedding_0.w_0_beta2_pow_acc_0', }
]
)
self.sharding_model( self.sharding_model(
Optimizer="adam", sharded_accumulators=sharded_accumulators Optimizer="adam", sharded_accumulators=sharded_accumulators
) )
def test_sharding_momentum(self): def test_sharding_momentum(self):
sharded_accumulators = set( sharded_accumulators = {
[ 'linear_6.w_0_velocity_0',
'linear_6.w_0_velocity_0', 'linear_7.b_0_velocity_0',
'linear_7.b_0_velocity_0', 'linear_8.b_0_velocity_0',
'linear_8.b_0_velocity_0', 'embedding_2.w_0_velocity_0',
'embedding_2.w_0_velocity_0', }
]
)
self.sharding_model( self.sharding_model(
Optimizer="Momentum", sharded_accumulators=sharded_accumulators Optimizer="Momentum", sharded_accumulators=sharded_accumulators
) )
......
...@@ -42,17 +42,15 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -42,17 +42,15 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('@BroadCast', ''.join(vars))
self.assertEqual( self.assertEqual(
set(parameters), set(parameters),
set( {
[ "fc_1.b_0",
"fc_1.b_0", "fc_2.b_0",
"fc_2.b_0", "fc_2.w_0",
"fc_2.w_0", "fc_1.b_0_velocity_0",
"fc_1.b_0_velocity_0", "fc_2.b_0_velocity_0",
"fc_2.b_0_velocity_0", "fc_2.w_0_velocity_0",
"fc_2.w_0_velocity_0", "learning_rate_0",
"learning_rate_0", },
]
),
) )
self.assertEqual( self.assertEqual(
...@@ -123,20 +121,18 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -123,20 +121,18 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self.assertIn('check_finite_and_unscale', ops) self.assertIn('check_finite_and_unscale', ops)
self.assertEqual( self.assertEqual(
set(parameters), set(parameters),
set( {
[ "fc_1.b_0",
"fc_1.b_0", "fc_2.b_0",
"fc_2.b_0", "fc_2.w_0",
"fc_2.w_0", "fc_1.b_0_velocity_0",
"fc_1.b_0_velocity_0", "fc_2.b_0_velocity_0",
"fc_2.b_0_velocity_0", "fc_2.w_0_velocity_0",
"fc_2.w_0_velocity_0", "learning_rate_0",
"learning_rate_0", "loss_scaling_0",
"loss_scaling_0", "num_bad_steps_0",
"num_bad_steps_0", "num_good_steps_0",
"num_good_steps_0", },
]
),
) )
self.assertEqual( self.assertEqual(
...@@ -232,17 +228,15 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -232,17 +228,15 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self.assertIn('subprog', ''.join(vars)) self.assertIn('subprog', ''.join(vars))
self.assertEqual( self.assertEqual(
set(parameters), set(parameters),
set( {
[ "fc_1.b_0",
"fc_1.b_0", "fc_2.b_0",
"fc_2.b_0", "fc_2.w_0",
"fc_2.w_0", "fc_1.b_0_velocity_0",
"fc_1.b_0_velocity_0", "fc_2.b_0_velocity_0",
"fc_2.b_0_velocity_0", "fc_2.w_0_velocity_0",
"fc_2.w_0_velocity_0", "learning_rate_0",
"learning_rate_0", },
]
),
) )
self.assertEqual( self.assertEqual(
...@@ -322,20 +316,18 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -322,20 +316,18 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self.assertEqual( self.assertEqual(
set(parameters), set(parameters),
set( {
[ "fc_1.b_0",
"fc_1.b_0", "fc_2.b_0",
"fc_2.b_0", "fc_2.w_0",
"fc_2.w_0", "fc_1.b_0_velocity_0",
"fc_1.b_0_velocity_0", "fc_2.b_0_velocity_0",
"fc_2.b_0_velocity_0", "fc_2.w_0_velocity_0",
"fc_2.w_0_velocity_0", "learning_rate_0",
"learning_rate_0", "loss_scaling_0",
"loss_scaling_0", "num_bad_steps_0",
"num_bad_steps_0", "num_good_steps_0",
"num_good_steps_0", },
]
),
) )
self.assertEqual( self.assertEqual(
ops, ops,
...@@ -448,23 +440,21 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -448,23 +440,21 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self.assertEqual( self.assertEqual(
set(parameters), set(parameters),
set( {
[ 'fc_2.b_0',
'fc_2.b_0', 'num_good_steps_0',
'num_good_steps_0', 'fc_2.w_0',
'fc_2.w_0', 'loss_scaling_0',
'loss_scaling_0', 'num_bad_steps_0',
'num_bad_steps_0', 'fc_2.w_0_velocity_0',
'fc_2.w_0_velocity_0', 'fc_2.w_0.asp_mask',
'fc_2.w_0.asp_mask', 'learning_rate_0',
'learning_rate_0', 'fc_1.b_0',
'fc_1.b_0', 'fc_1.w_0.asp_mask',
'fc_1.w_0.asp_mask', 'fc_0.w_0.asp_mask',
'fc_0.w_0.asp_mask', 'fc_1.b_0_velocity_0',
'fc_1.b_0_velocity_0', 'fc_2.b_0_velocity_0',
'fc_2.b_0_velocity_0', },
]
),
) )
self.assertEqual( self.assertEqual(
ops, ops,
...@@ -563,17 +553,15 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -563,17 +553,15 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('@BroadCast', ''.join(vars))
self.assertEqual( self.assertEqual(
set(parameters), set(parameters),
set( {
[ "fc_1.b_0",
"fc_1.b_0", "fc_2.b_0",
"fc_2.b_0", "fc_2.w_0",
"fc_2.w_0", "fc_1.b_0_velocity_0",
"fc_1.b_0_velocity_0", "fc_2.b_0_velocity_0",
"fc_2.b_0_velocity_0", "fc_2.w_0_velocity_0",
"fc_2.w_0_velocity_0", "learning_rate_0",
"learning_rate_0", },
]
),
) )
self.assertEqual( self.assertEqual(
...@@ -650,17 +638,15 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -650,17 +638,15 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('@BroadCast', ''.join(vars))
self.assertEqual( self.assertEqual(
set(parameters), set(parameters),
set( {
[ "fc_1.b_0",
"fc_1.b_0", "fc_2.b_0",
"fc_2.b_0", "fc_2.w_0",
"fc_2.w_0", "fc_1.b_0_velocity_0",
"fc_1.b_0_velocity_0", "fc_2.b_0_velocity_0",
"fc_2.b_0_velocity_0", "fc_2.w_0_velocity_0",
"fc_2.w_0_velocity_0", "learning_rate_0",
"learning_rate_0", },
]
),
) )
self.assertEqual( self.assertEqual(
......
...@@ -238,12 +238,12 @@ class TestNameVisitor(unittest.TestCase): ...@@ -238,12 +238,12 @@ class TestNameVisitor(unittest.TestCase):
for_loop_dufunc_with_listcomp, for_loop_dufunc_with_listcomp,
] ]
self.loop_var_names = [ self.loop_var_names = [
set(["i", "x"]), {"i", "x"},
set(["i", "ret", "max_len"]), {"i", "ret", "max_len"},
set(["i", "x"]), {"i", "x"},
set(["j", "array", "res", "x"]), {"j", "array", "res", "x"},
] ]
self.create_var_names = [set(), set(["ret"]), set(), set(["res", "x"])] self.create_var_names = [set(), {"ret"}, set(), {"res", "x"}]
self.nested_for_loop_func = nested_for_loop_dyfunc self.nested_for_loop_func = nested_for_loop_dyfunc
...@@ -269,11 +269,11 @@ class TestNameVisitor(unittest.TestCase): ...@@ -269,11 +269,11 @@ class TestNameVisitor(unittest.TestCase):
name_visitor = NameVisitor(gast_root) name_visitor = NameVisitor(gast_root)
self.loop_var_names = [ self.loop_var_names = [
set(["j", "two"]), {"j", "two"},
set(["i", "three", "b"]), {"i", "three", "b"},
set(["i"]), {"i"},
] ]
self.create_var_names = [set(), set(["b"]), set()] self.create_var_names = [set(), {"b"}, set()]
i = 0 i = 0
for node in gast.walk(gast_root): for node in gast.walk(gast_root):
......
...@@ -46,7 +46,7 @@ class InferencePassTest(unittest.TestCase): ...@@ -46,7 +46,7 @@ class InferencePassTest(unittest.TestCase):
random.seed(1) random.seed(1)
def _get_place(self): def _get_place(self):
return set([False, core.is_compiled_with_cuda()]) return {False, core.is_compiled_with_cuda()}
def _save_models( def _save_models(
self, dirname, feeded_var_names, target_vars, executor, program, scope self, dirname, feeded_var_names, target_vars, executor, program, scope
......
...@@ -53,7 +53,7 @@ class TestConvertToMixedPrecision(unittest.TestCase): ...@@ -53,7 +53,7 @@ class TestConvertToMixedPrecision(unittest.TestCase):
PrecisionType.Bfloat16, PrecisionType.Bfloat16,
] ]
keep_io_types_options = [True, False, False, True] keep_io_types_options = [True, False, False, True]
black_list_options = [set(), set(), set(['conv2d']), set()] black_list_options = [set(), set(), {'conv2d'}, set()]
test_configs = zip( test_configs = zip(
mixed_precision_options, keep_io_types_options, black_list_options mixed_precision_options, keep_io_types_options, black_list_options
......
...@@ -206,7 +206,7 @@ class TestConv2DWithGradBF16Op(TestConv2DBF16Op): ...@@ -206,7 +206,7 @@ class TestConv2DWithGradBF16Op(TestConv2DBF16Op):
core.CPUPlace(), core.CPUPlace(),
["Input"], ["Input"],
"Output", "Output",
set(['Filter']), {'Filter'},
user_defined_grads=[dx], user_defined_grads=[dx],
user_defined_grad_outputs=[convert_float_to_uint16(dout)], user_defined_grad_outputs=[convert_float_to_uint16(dout)],
) )
...@@ -222,7 +222,7 @@ class TestConv2DWithGradBF16Op(TestConv2DBF16Op): ...@@ -222,7 +222,7 @@ class TestConv2DWithGradBF16Op(TestConv2DBF16Op):
core.CPUPlace(), core.CPUPlace(),
["Filter"], ["Filter"],
"Output", "Output",
set(['Input']), {'Input'},
user_defined_grads=[dweights], user_defined_grads=[dweights],
user_defined_grad_outputs=[convert_float_to_uint16(dout)], user_defined_grad_outputs=[convert_float_to_uint16(dout)],
) )
......
...@@ -79,7 +79,7 @@ class TestPoolBf16MklDNNOpGrad(TestPool2D_Op_Mixin, OpTest): ...@@ -79,7 +79,7 @@ class TestPoolBf16MklDNNOpGrad(TestPool2D_Op_Mixin, OpTest):
) )
x_grad = x_grad / np.prod(self.outputs['Out'].shape) x_grad = x_grad / np.prod(self.outputs['Out'].shape)
self.check_grad_with_place( self.check_grad_with_place(
core.CPUPlace(), set(['X']), 'Out', user_defined_grads=[x_grad] core.CPUPlace(), {'X'}, 'Out', user_defined_grads=[x_grad]
) )
......
...@@ -164,7 +164,7 @@ class OpTestUtils: ...@@ -164,7 +164,7 @@ class OpTestUtils:
if api_params == []: if api_params == []:
results.append(input_arguments) results.append(input_arguments)
return results return results
api_ignore_param_list = set(['name', 'dtype', 'out', 'output']) api_ignore_param_list = {'name', 'dtype', 'out', 'output'}
idx_of_op_proto_arguments = 0 idx_of_op_proto_arguments = 0
for idx, arg_name in enumerate(api_params): for idx, arg_name in enumerate(api_params):
if arg_name in api_ignore_param_list: if arg_name in api_ignore_param_list:
......
...@@ -179,7 +179,7 @@ class TestSeqProject(OpTest): ...@@ -179,7 +179,7 @@ class TestSeqProject(OpTest):
self.check_grad( self.check_grad(
['PaddingData'], ['PaddingData'],
'Out', 'Out',
no_grad_set=set(['X', 'Filter']), no_grad_set={'X', 'Filter'},
check_dygraph=False, check_dygraph=False,
) )
...@@ -198,7 +198,7 @@ class TestSeqProject(OpTest): ...@@ -198,7 +198,7 @@ class TestSeqProject(OpTest):
['X', 'Filter'], ['X', 'Filter'],
'Out', 'Out',
max_relative_error=0.05, max_relative_error=0.05,
no_grad_set=set(['PaddingData']), no_grad_set={'PaddingData'},
check_dygraph=False, check_dygraph=False,
) )
...@@ -208,7 +208,7 @@ class TestSeqProject(OpTest): ...@@ -208,7 +208,7 @@ class TestSeqProject(OpTest):
self.inputs_val_no_f, self.inputs_val_no_f,
'Out', 'Out',
max_relative_error=0.05, max_relative_error=0.05,
no_grad_set=set(['Filter']), no_grad_set={'Filter'},
check_dygraph=False, check_dygraph=False,
) )
...@@ -218,7 +218,7 @@ class TestSeqProject(OpTest): ...@@ -218,7 +218,7 @@ class TestSeqProject(OpTest):
self.inputs_val_no_x, self.inputs_val_no_x,
'Out', 'Out',
max_relative_error=0.05, max_relative_error=0.05,
no_grad_set=set(['X']), no_grad_set={'X'},
check_dygraph=False, check_dygraph=False,
) )
......
...@@ -62,7 +62,7 @@ class TestAffineChannelOp(OpTest): ...@@ -62,7 +62,7 @@ class TestAffineChannelOp(OpTest):
self.check_grad( self.check_grad(
['X'], ['X'],
'Out', 'Out',
no_grad_set=set(['Scale', 'Bias']), no_grad_set={'Scale', 'Bias'},
check_dygraph=False, check_dygraph=False,
) )
......
...@@ -187,16 +187,14 @@ class TestBackward(unittest.TestCase): ...@@ -187,16 +187,14 @@ class TestBackward(unittest.TestCase):
class SimpleNet(BackwardNet): class SimpleNet(BackwardNet):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.stop_gradient_grad_vars = set( self.stop_gradient_grad_vars = {
[ 'x_no_grad@GRAD',
'x_no_grad@GRAD', 'x2_no_grad@GRAD',
'x2_no_grad@GRAD', 'x3_no_grad@GRAD',
'x3_no_grad@GRAD', 'label_no_grad@GRAD',
'label_no_grad@GRAD', }
]
)
self.no_grad_vars = set() self.no_grad_vars = set()
self.params_names = set(['w2v', 'fc_predict.b_0', 'fc_w']) self.params_names = {'w2v', 'fc_predict.b_0', 'fc_w'}
self.op_path = [ self.op_path = [
'lookup_table_v2', 'lookup_table_v2',
'lookup_table_v2', # embedding 'lookup_table_v2', # embedding
......
...@@ -617,7 +617,7 @@ class TestBatchNormOpTraining(unittest.TestCase): ...@@ -617,7 +617,7 @@ class TestBatchNormOpTraining(unittest.TestCase):
class TestBatchNormOpTrainingCase1(TestBatchNormOpTraining): class TestBatchNormOpTrainingCase1(TestBatchNormOpTraining):
def init_test_case(self): def init_test_case(self):
self.use_global_stats = False self.use_global_stats = False
self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) self.no_grad_set = {'scale@GRAD', 'bias@GRAD'}
self.fetch_list = ['y', 'mean', 'variance', 'x@GRAD'] self.fetch_list = ['y', 'mean', 'variance', 'x@GRAD']
...@@ -641,7 +641,7 @@ class TestBatchNormOpTrainingCase2(TestBatchNormOpTraining): ...@@ -641,7 +641,7 @@ class TestBatchNormOpTrainingCase2(TestBatchNormOpTraining):
class TestBatchNormOpTrainingCase3(TestBatchNormOpTraining): class TestBatchNormOpTrainingCase3(TestBatchNormOpTraining):
def init_test_case(self): def init_test_case(self):
self.use_global_stats = False self.use_global_stats = False
self.no_grad_set = set(['x@GRAD']) self.no_grad_set = {'x@GRAD'}
self.fetch_list = ['y', 'mean', 'variance', 'scale@GRAD', 'bias@GRAD'] self.fetch_list = ['y', 'mean', 'variance', 'scale@GRAD', 'bias@GRAD']
...@@ -747,7 +747,7 @@ class TestBatchNormOpFreezeStatsAndScaleBiasTraining( ...@@ -747,7 +747,7 @@ class TestBatchNormOpFreezeStatsAndScaleBiasTraining(
): ):
def init_test_case(self): def init_test_case(self):
self.use_global_stats = True self.use_global_stats = True
self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) self.no_grad_set = {'scale@GRAD', 'bias@GRAD'}
self.fetch_list = ['y', 'mean', 'variance', 'x@GRAD'] self.fetch_list = ['y', 'mean', 'variance', 'x@GRAD']
......
...@@ -183,14 +183,14 @@ def create_test_cudnn_fp16_class(parent, grad_check=True): ...@@ -183,14 +183,14 @@ def create_test_cudnn_fp16_class(parent, grad_check=True):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place) and grad_check: if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place( self.check_grad_with_place(
place, ['Input'], 'Output', no_grad_set=set(['Filter']) place, ['Input'], 'Output', no_grad_set={'Filter'}
) )
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place) and grad_check: if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place( self.check_grad_with_place(
place, ['Filter'], 'Output', no_grad_set=set(['Input']) place, ['Filter'], 'Output', no_grad_set={'Input'}
) )
cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16") cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16")
...@@ -231,7 +231,7 @@ def create_test_cudnn_bf16_class(parent): ...@@ -231,7 +231,7 @@ def create_test_cudnn_bf16_class(parent):
place, place,
['Input'], ['Input'],
'Output', 'Output',
no_grad_set=set(['Filter']), no_grad_set={'Filter'},
user_defined_grads=[numeric_grads], user_defined_grads=[numeric_grads],
) )
...@@ -242,7 +242,7 @@ def create_test_cudnn_bf16_class(parent): ...@@ -242,7 +242,7 @@ def create_test_cudnn_bf16_class(parent):
place, place,
['Filter'], ['Filter'],
'Output', 'Output',
no_grad_set=set(['Input']), no_grad_set={'Input'},
user_defined_grads=[numeric_grads], user_defined_grads=[numeric_grads],
) )
...@@ -307,14 +307,14 @@ def create_test_cudnn_channel_last_fp16_class(parent, grad_check=True): ...@@ -307,14 +307,14 @@ def create_test_cudnn_channel_last_fp16_class(parent, grad_check=True):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place) and grad_check: if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place( self.check_grad_with_place(
place, ['Input'], 'Output', no_grad_set=set(['Filter']) place, ['Input'], 'Output', no_grad_set={'Filter'}
) )
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place) and grad_check: if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place( self.check_grad_with_place(
place, ['Filter'], 'Output', no_grad_set=set(['Input']) place, ['Filter'], 'Output', no_grad_set={'Input'}
) )
def init_data_format(self): def init_data_format(self):
...@@ -506,7 +506,7 @@ class TestConv2DOp(OpTest): ...@@ -506,7 +506,7 @@ class TestConv2DOp(OpTest):
['Input'], ['Input'],
'Output', 'Output',
max_relative_error=0.02, max_relative_error=0.02,
no_grad_set=set(['Filter']), no_grad_set={'Filter'},
check_dygraph=(not self.use_mkldnn), check_dygraph=(not self.use_mkldnn),
) )
...@@ -521,7 +521,7 @@ class TestConv2DOp(OpTest): ...@@ -521,7 +521,7 @@ class TestConv2DOp(OpTest):
place, place,
['Filter'], ['Filter'],
'Output', 'Output',
no_grad_set=set(['Input']), no_grad_set={'Input'},
check_dygraph=(not self.use_mkldnn), check_dygraph=(not self.use_mkldnn),
) )
...@@ -826,7 +826,7 @@ class TestConv2DOp_v2(OpTest): ...@@ -826,7 +826,7 @@ class TestConv2DOp_v2(OpTest):
['Input'], ['Input'],
'Output', 'Output',
max_relative_error=0.02, max_relative_error=0.02,
no_grad_set=set(['Filter']), no_grad_set={'Filter'},
check_dygraph=(not self.use_mkldnn), check_dygraph=(not self.use_mkldnn),
) )
...@@ -839,7 +839,7 @@ class TestConv2DOp_v2(OpTest): ...@@ -839,7 +839,7 @@ class TestConv2DOp_v2(OpTest):
place, place,
['Filter'], ['Filter'],
'Output', 'Output',
no_grad_set=set(['Input']), no_grad_set={'Input'},
check_dygraph=(not self.use_mkldnn), check_dygraph=(not self.use_mkldnn),
) )
......
...@@ -228,24 +228,20 @@ class TestConv2DTransposeOp(OpTest): ...@@ -228,24 +228,20 @@ class TestConv2DTransposeOp(OpTest):
['Filter'], ['Filter'],
'Output', 'Output',
max_relative_error=0.02, max_relative_error=0.02,
no_grad_set=set(['Input']), no_grad_set={'Input'},
) )
else: else:
self.check_grad( self.check_grad(['Filter'], 'Output', no_grad_set={'Input'})
['Filter'], 'Output', no_grad_set=set(['Input'])
)
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
if self.need_check_grad: if self.need_check_grad:
if self.use_cudnn: if self.use_cudnn:
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, ['Input'], 'Output', no_grad_set=set(['Filter']) place, ['Input'], 'Output', no_grad_set={'Filter'}
) )
else: else:
self.check_grad( self.check_grad(['Input'], 'Output', no_grad_set={'Filter'})
['Input'], 'Output', no_grad_set=set(['Filter'])
)
def test_check_grad(self): def test_check_grad(self):
if self.need_check_grad: if self.need_check_grad:
...@@ -253,13 +249,13 @@ class TestConv2DTransposeOp(OpTest): ...@@ -253,13 +249,13 @@ class TestConv2DTransposeOp(OpTest):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, place,
set(['Input', 'Filter']), {'Input', 'Filter'},
'Output', 'Output',
max_relative_error=0.02, max_relative_error=0.02,
) )
else: else:
self.check_grad( self.check_grad(
set(['Input', 'Filter']), 'Output', max_relative_error=0.02 {'Input', 'Filter'}, 'Output', max_relative_error=0.02
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -380,7 +380,7 @@ class TestConv3DOp(OpTest): ...@@ -380,7 +380,7 @@ class TestConv3DOp(OpTest):
['Input'], ['Input'],
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Filter']), no_grad_set={'Filter'},
check_dygraph=(not self.use_mkldnn), check_dygraph=(not self.use_mkldnn),
) )
...@@ -394,7 +394,7 @@ class TestConv3DOp(OpTest): ...@@ -394,7 +394,7 @@ class TestConv3DOp(OpTest):
['Filter'], ['Filter'],
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Input']), no_grad_set={'Input'},
check_dygraph=(not self.use_mkldnn), check_dygraph=(not self.use_mkldnn),
) )
...@@ -694,7 +694,7 @@ class TestConv3DOp_2(OpTest): ...@@ -694,7 +694,7 @@ class TestConv3DOp_2(OpTest):
['Input'], ['Input'],
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Filter']), no_grad_set={'Filter'},
) )
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
...@@ -706,7 +706,7 @@ class TestConv3DOp_2(OpTest): ...@@ -706,7 +706,7 @@ class TestConv3DOp_2(OpTest):
['Filter'], ['Filter'],
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Input']), no_grad_set={'Input'},
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -206,13 +206,13 @@ class TestConv3DTransposeOp(OpTest): ...@@ -206,13 +206,13 @@ class TestConv3DTransposeOp(OpTest):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, place,
set(['Input', 'Filter']), {'Input', 'Filter'},
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
) )
else: else:
self.check_grad( self.check_grad(
set(['Input', 'Filter']), 'Output', max_relative_error=0.03 {'Input', 'Filter'}, 'Output', max_relative_error=0.03
) )
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
...@@ -223,14 +223,14 @@ class TestConv3DTransposeOp(OpTest): ...@@ -223,14 +223,14 @@ class TestConv3DTransposeOp(OpTest):
['Input'], ['Input'],
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Filter']), no_grad_set={'Filter'},
) )
elif self.check_no_filter: elif self.check_no_filter:
self.check_grad( self.check_grad(
['Input'], ['Input'],
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Filter']), no_grad_set={'Filter'},
) )
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
...@@ -241,14 +241,14 @@ class TestConv3DTransposeOp(OpTest): ...@@ -241,14 +241,14 @@ class TestConv3DTransposeOp(OpTest):
['Filter'], ['Filter'],
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Input']), no_grad_set={'Input'},
) )
elif self.check_no_input: elif self.check_no_input:
self.check_grad( self.check_grad(
['Filter'], ['Filter'],
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Input']), no_grad_set={'Input'},
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -276,7 +276,7 @@ class TestDataNormOp(OpTest): ...@@ -276,7 +276,7 @@ class TestDataNormOp(OpTest):
test check backward, check grad test check backward, check grad
""" """
# NODE(yjjiang11): This op will be deprecated. # NODE(yjjiang11): This op will be deprecated.
self.check_grad(['X'], 'Y', no_grad_set=set([]), check_dygraph=False) self.check_grad(['X'], 'Y', no_grad_set=set(), check_dygraph=False)
class TestDataNormOpWithEnableScaleAndShift(OpTest): class TestDataNormOpWithEnableScaleAndShift(OpTest):
...@@ -340,7 +340,7 @@ class TestDataNormOpWithEnableScaleAndShift(OpTest): ...@@ -340,7 +340,7 @@ class TestDataNormOpWithEnableScaleAndShift(OpTest):
test check backward, check grad test check backward, check grad
""" """
# NODE(yjjiang11): This op will be deprecated. # NODE(yjjiang11): This op will be deprecated.
self.check_grad(['X'], 'Y', no_grad_set=set([]), check_dygraph=False) self.check_grad(['X'], 'Y', no_grad_set=set(), check_dygraph=False)
class TestDataNormOpWithoutEnableScaleAndShift(OpTest): class TestDataNormOpWithoutEnableScaleAndShift(OpTest):
...@@ -399,7 +399,7 @@ class TestDataNormOpWithoutEnableScaleAndShift(OpTest): ...@@ -399,7 +399,7 @@ class TestDataNormOpWithoutEnableScaleAndShift(OpTest):
test check backward, check grad test check backward, check grad
""" """
# NODE(yjjiang11): This op will be deprecated. # NODE(yjjiang11): This op will be deprecated.
self.check_grad(['X'], 'Y', no_grad_set=set([]), check_dygraph=False) self.check_grad(['X'], 'Y', no_grad_set=set(), check_dygraph=False)
class TestDataNormOpWithEnableScaleAndShift_1(OpTest): class TestDataNormOpWithEnableScaleAndShift_1(OpTest):
...@@ -463,7 +463,7 @@ class TestDataNormOpWithEnableScaleAndShift_1(OpTest): ...@@ -463,7 +463,7 @@ class TestDataNormOpWithEnableScaleAndShift_1(OpTest):
test check backward, check grad test check backward, check grad
""" """
# NODE(yjjiang11): This op will be deprecated. # NODE(yjjiang11): This op will be deprecated.
self.check_grad(['X'], 'Y', no_grad_set=set([]), check_dygraph=False) self.check_grad(['X'], 'Y', no_grad_set=set(), check_dygraph=False)
class TestDataNormOpWithSlotDim(OpTest): class TestDataNormOpWithSlotDim(OpTest):
...@@ -521,7 +521,7 @@ class TestDataNormOpWithSlotDim(OpTest): ...@@ -521,7 +521,7 @@ class TestDataNormOpWithSlotDim(OpTest):
test check backward, check grad test check backward, check grad
""" """
# NODE(yjjiang11): This op will be deprecated. # NODE(yjjiang11): This op will be deprecated.
self.check_grad(['X'], 'Y', no_grad_set=set([]), check_dygraph=False) self.check_grad(['X'], 'Y', no_grad_set=set(), check_dygraph=False)
class TestDataNormOpErrorr(unittest.TestCase): class TestDataNormOpErrorr(unittest.TestCase):
......
...@@ -202,7 +202,7 @@ class TestModulatedDeformableConvOp(OpTest): ...@@ -202,7 +202,7 @@ class TestModulatedDeformableConvOp(OpTest):
['Input', 'Offset'], ['Input', 'Offset'],
'Output', 'Output',
max_relative_error=0.1, max_relative_error=0.1,
no_grad_set=set(['Filter']), no_grad_set={'Filter'},
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -73,9 +73,7 @@ class TestFilterByInstagOp(OpTest): ...@@ -73,9 +73,7 @@ class TestFilterByInstagOp(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['Ins'], 'Out', no_grad_set={'Ins_tag', 'Filter_tag'})
['Ins'], 'Out', no_grad_set=set(['Ins_tag', 'Filter_tag'])
)
"""This is Test Case 2""" """This is Test Case 2"""
...@@ -119,9 +117,7 @@ class TestFilterByInstagOp2(OpTest): ...@@ -119,9 +117,7 @@ class TestFilterByInstagOp2(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['Ins'], 'Out', no_grad_set={'Ins_tag', 'Filter_tag'})
['Ins'], 'Out', no_grad_set=set(['Ins_tag', 'Filter_tag'])
)
"""This is Test Case 3""" """This is Test Case 3"""
...@@ -162,9 +158,7 @@ class TestFilterByInstagOp3(OpTest): ...@@ -162,9 +158,7 @@ class TestFilterByInstagOp3(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['Ins'], 'Out', no_grad_set={'Ins_tag', 'Filter_tag'})
['Ins'], 'Out', no_grad_set=set(['Ins_tag', 'Filter_tag'])
)
"""This is Test Case 4""" """This is Test Case 4"""
...@@ -204,9 +198,7 @@ class TestFilterByInstagOp4(OpTest): ...@@ -204,9 +198,7 @@ class TestFilterByInstagOp4(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['Ins'], 'Out', no_grad_set={'Ins_tag', 'Filter_tag'})
['Ins'], 'Out', no_grad_set=set(['Ins_tag', 'Filter_tag'])
)
class TestFilterByInstagOp6(OpTest): class TestFilterByInstagOp6(OpTest):
......
...@@ -126,7 +126,7 @@ class TestGroupNormOp(OpTest): ...@@ -126,7 +126,7 @@ class TestGroupNormOp(OpTest):
self.op = create_op( self.op = create_op(
self.scope, self.op_type, op_inputs, op_outputs, op_attrs self.scope, self.op_type, op_inputs, op_outputs, op_attrs
) )
inputs_to_check = set(['X', 'Scale', 'Bias']) inputs_to_check = {'X', 'Scale', 'Bias'}
output_names = 'Y' output_names = 'Y'
cpu_grads = self._get_gradient( cpu_grads = self._get_gradient(
inputs_to_check, place, output_names, None inputs_to_check, place, output_names, None
...@@ -148,12 +148,12 @@ class TestGroupNormOp(OpTest): ...@@ -148,12 +148,12 @@ class TestGroupNormOp(OpTest):
return return
place = core.CPUPlace() place = core.CPUPlace()
self.check_grad_with_place(place, set(['X', 'Scale', 'Bias']), 'Y') self.check_grad_with_place(place, {'X', 'Scale', 'Bias'}, 'Y')
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, place,
set(['X', 'Scale', 'Bias']), {'X', 'Scale', 'Bias'},
'Y', 'Y',
) )
...@@ -187,7 +187,7 @@ class TestGroupNormFP16OP(TestGroupNormOp): ...@@ -187,7 +187,7 @@ class TestGroupNormFP16OP(TestGroupNormOp):
return return
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place(place, set(['X', 'Scale', 'Bias']), 'Y') self.check_grad_with_place(place, {'X', 'Scale', 'Bias'}, 'Y')
def init_test_case(self): def init_test_case(self):
self.dtype = np.float16 self.dtype = np.float16
...@@ -250,7 +250,7 @@ class TestGroupNormBF16Op(OpTest): ...@@ -250,7 +250,7 @@ class TestGroupNormBF16Op(OpTest):
return return
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place(place, set(['X', 'Scale', 'Bias']), 'Y') self.check_grad_with_place(place, {'X', 'Scale', 'Bias'}, 'Y')
def init_test_case(self): def init_test_case(self):
pass pass
......
...@@ -48,13 +48,11 @@ class TestTracedLayerRecordNonPersistableInput(unittest.TestCase): ...@@ -48,13 +48,11 @@ class TestTracedLayerRecordNonPersistableInput(unittest.TestCase):
learning_rate=1e-3, parameter_list=layer.parameters() learning_rate=1e-3, parameter_list=layer.parameters()
) )
expected_persistable_vars = set( expected_persistable_vars = {
[ layer._linear.weight.name,
layer._linear.weight.name, layer._linear.bias.name,
layer._linear.bias.name, layer._offset.name,
layer._offset.name, }
]
)
for _ in range(10): for _ in range(10):
in_x = fluid.dygraph.to_variable( in_x = fluid.dygraph.to_variable(
......
...@@ -60,7 +60,7 @@ class TestInferNoNeedBufferSlots(unittest.TestCase): ...@@ -60,7 +60,7 @@ class TestInferNoNeedBufferSlots(unittest.TestCase):
core.infer_no_need_buffer_slots( core.infer_no_need_buffer_slots(
op.type, inputs, outputs, attrs op.type, inputs, outputs, attrs
), ),
set([]), set(),
) )
elif idx == 1: elif idx == 1:
# fill constant op # fill constant op
...@@ -68,7 +68,7 @@ class TestInferNoNeedBufferSlots(unittest.TestCase): ...@@ -68,7 +68,7 @@ class TestInferNoNeedBufferSlots(unittest.TestCase):
core.infer_no_need_buffer_slots( core.infer_no_need_buffer_slots(
op.type, inputs, outputs, attrs op.type, inputs, outputs, attrs
), ),
set([]), set(),
) )
else: else:
# elementwise_add_grad op # elementwise_add_grad op
...@@ -76,7 +76,7 @@ class TestInferNoNeedBufferSlots(unittest.TestCase): ...@@ -76,7 +76,7 @@ class TestInferNoNeedBufferSlots(unittest.TestCase):
core.infer_no_need_buffer_slots( core.infer_no_need_buffer_slots(
op.type, inputs, outputs, attrs op.type, inputs, outputs, attrs
), ),
set(['Y', 'X']), {'Y', 'X'},
) )
......
...@@ -221,14 +221,14 @@ class TestInstanceNormOpTraining(unittest.TestCase): ...@@ -221,14 +221,14 @@ class TestInstanceNormOpTraining(unittest.TestCase):
class TestInstanceNormOpTrainingCase1(TestInstanceNormOpTraining): class TestInstanceNormOpTrainingCase1(TestInstanceNormOpTraining):
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) self.no_grad_set = {'scale@GRAD', 'bias@GRAD'}
self.fetch_list = ['y', 'saved_mean', 'saved_variance', 'x@GRAD'] self.fetch_list = ['y', 'saved_mean', 'saved_variance', 'x@GRAD']
class TestInstanceNormOpTrainingCase2(TestInstanceNormOpTraining): class TestInstanceNormOpTrainingCase2(TestInstanceNormOpTraining):
def init_test_case(self): def init_test_case(self):
self.shape = [20, 50, 4, 5] self.shape = [20, 50, 4, 5]
self.no_grad_set = set(['scale@GRAD', 'bias@GRAD']) self.no_grad_set = {'scale@GRAD', 'bias@GRAD'}
self.fetch_list = ['y', 'saved_mean', 'saved_variance', 'x@GRAD'] self.fetch_list = ['y', 'saved_mean', 'saved_variance', 'x@GRAD']
......
...@@ -534,7 +534,7 @@ class TestCUDNNLstmOp(OpTest): ...@@ -534,7 +534,7 @@ class TestCUDNNLstmOp(OpTest):
for var_name in var_name_list: for var_name in var_name_list:
self.check_grad_with_place( self.check_grad_with_place(
place, place,
set(['Input', var_name, 'InitH', 'InitC']), {'Input', var_name, 'InitH', 'InitC'},
['Out', 'LastH', 'LastC'], ['Out', 'LastH', 'LastC'],
) )
......
...@@ -414,7 +414,7 @@ def create_test_bf16_class(parent, atol=0.01): ...@@ -414,7 +414,7 @@ def create_test_bf16_class(parent, atol=0.01):
place, place,
['X'], ['X'],
'Out', 'Out',
no_grad_set=set(['Y']), no_grad_set={'Y'},
user_defined_grads=[numeric_grads], user_defined_grads=[numeric_grads],
) )
...@@ -425,7 +425,7 @@ def create_test_bf16_class(parent, atol=0.01): ...@@ -425,7 +425,7 @@ def create_test_bf16_class(parent, atol=0.01):
place, place,
['Y'], ['Y'],
'Out', 'Out',
no_grad_set=set(['X']), no_grad_set={'X'},
user_defined_grads=[numeric_grads], user_defined_grads=[numeric_grads],
) )
......
...@@ -54,7 +54,7 @@ class TestMultiplexOp(OpTest): ...@@ -54,7 +54,7 @@ class TestMultiplexOp(OpTest):
self.check_grad(['x2', 'x3', 'x4'], 'Out', no_grad_set=set('x1')) self.check_grad(['x2', 'x3', 'x4'], 'Out', no_grad_set=set('x1'))
def test_check_grad_ignore_x1_x2(self): def test_check_grad_ignore_x1_x2(self):
self.check_grad(['x3', 'x4'], 'Out', no_grad_set=set(['x1', 'x2'])) self.check_grad(['x3', 'x4'], 'Out', no_grad_set={'x1', 'x2'})
def test_check_grad_ignore_x3(self): def test_check_grad_ignore_x3(self):
self.check_grad(['x1', 'x2', 'x4'], 'Out', no_grad_set=set('x3')) self.check_grad(['x1', 'x2', 'x4'], 'Out', no_grad_set=set('x3'))
......
...@@ -66,18 +66,16 @@ class TestOperator(unittest.TestCase): ...@@ -66,18 +66,16 @@ class TestOperator(unittest.TestCase):
self.assertEqual(mul_op.output("Out"), ["mul.out"]) self.assertEqual(mul_op.output("Out"), ["mul.out"])
self.assertEqual( self.assertEqual(
set(mul_op.attr_names), set(mul_op.attr_names),
set( {
[ "x_num_col_dims",
"x_num_col_dims", "y_num_col_dims",
"y_num_col_dims", "op_role",
"op_role", "op_role_var",
"op_role_var", "op_namescope",
"op_namescope", "op_callstack",
"op_callstack", "op_device",
"op_device", "with_quant_attr",
"with_quant_attr", },
]
),
) )
self.assertEqual(mul_op.has_attr("x_num_col_dims"), True) self.assertEqual(mul_op.has_attr("x_num_col_dims"), True)
self.assertEqual(mul_op.attr_type("x_num_col_dims"), core.AttrType.INT) self.assertEqual(mul_op.attr_type("x_num_col_dims"), core.AttrType.INT)
......
...@@ -358,14 +358,14 @@ class TestPool2D_Op_Mixin: ...@@ -358,14 +358,14 @@ class TestPool2D_Op_Mixin:
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, place,
set(['X']), {'X'},
'Out', 'Out',
max_relative_error=0.07, max_relative_error=0.07,
check_dygraph=(not self.use_mkldnn), check_dygraph=(not self.use_mkldnn),
) )
elif self.pool_type != "max": elif self.pool_type != "max":
self.check_grad( self.check_grad(
set(['X']), {'X'},
'Out', 'Out',
max_relative_error=0.07, max_relative_error=0.07,
check_dygraph=(not self.use_mkldnn), check_dygraph=(not self.use_mkldnn),
...@@ -524,7 +524,7 @@ def create_test_cudnn_fp16_class(parent, check_grad=True): ...@@ -524,7 +524,7 @@ def create_test_cudnn_fp16_class(parent, check_grad=True):
): ):
self.check_grad_with_place( self.check_grad_with_place(
place, place,
set(['X']), {'X'},
'Out', 'Out',
max_relative_error=0.07, max_relative_error=0.07,
check_dygraph=(not self.use_mkldnn), check_dygraph=(not self.use_mkldnn),
...@@ -565,7 +565,7 @@ def create_test_fp16_class(parent, check_grad=True): ...@@ -565,7 +565,7 @@ def create_test_fp16_class(parent, check_grad=True):
): ):
self.check_grad_with_place( self.check_grad_with_place(
place, place,
set(['X']), {'X'},
'Out', 'Out',
max_relative_error=0.07, max_relative_error=0.07,
check_dygraph=(not self.use_mkldnn), check_dygraph=(not self.use_mkldnn),
...@@ -864,10 +864,10 @@ class TestCase5_Max(TestCase2): ...@@ -864,10 +864,10 @@ class TestCase5_Max(TestCase2):
if self.has_cudnn() and self.pool_type == "max": if self.has_cudnn() and self.pool_type == "max":
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, set(['X']), 'Out', max_relative_error=1.00 place, {'X'}, 'Out', max_relative_error=1.00
) )
elif self.pool_type == "max": elif self.pool_type == "max":
self.check_grad(set(['X']), 'Out', max_relative_error=1.00) self.check_grad({'X'}, 'Out', max_relative_error=1.00)
class TestCase5_channel_last_Max(TestCase5_Max): class TestCase5_channel_last_Max(TestCase5_Max):
......
...@@ -339,15 +339,15 @@ class TestPool3D_Op(OpTest): ...@@ -339,15 +339,15 @@ class TestPool3D_Op(OpTest):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_compiled_with_rocm(): if core.is_compiled_with_rocm():
self.check_grad_with_place( self.check_grad_with_place(
place, set(['X']), 'Out', max_relative_error=1e-2 place, {'X'}, 'Out', max_relative_error=1e-2
) )
else: else:
self.check_grad_with_place(place, set(['X']), 'Out') self.check_grad_with_place(place, {'X'}, 'Out')
elif self.pool_type != "max": elif self.pool_type != "max":
if core.is_compiled_with_rocm(): if core.is_compiled_with_rocm():
self.check_grad(set(['X']), 'Out', max_relative_error=1e-2) self.check_grad({'X'}, 'Out', max_relative_error=1e-2)
else: else:
self.check_grad(set(['X']), 'Out') self.check_grad({'X'}, 'Out')
def init_data_format(self): def init_data_format(self):
self.data_format = "NCDHW" self.data_format = "NCDHW"
...@@ -783,10 +783,10 @@ class TestCase5_Max(TestCase2): ...@@ -783,10 +783,10 @@ class TestCase5_Max(TestCase2):
if self.has_cudnn() and self.pool_type == "max": if self.has_cudnn() and self.pool_type == "max":
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, set(['X']), 'Out', max_relative_error=1.00 place, {'X'}, 'Out', max_relative_error=1.00
) )
elif self.pool_type == "max": elif self.pool_type == "max":
self.check_grad(set(['X']), 'Out', max_relative_error=1.00) self.check_grad({'X'}, 'Out', max_relative_error=1.00)
class TestCase5_channel_last_Max(TestCase5_Max): class TestCase5_channel_last_Max(TestCase5_Max):
......
...@@ -163,7 +163,7 @@ class TestMaxPoolWithIndex_Op(OpTest): ...@@ -163,7 +163,7 @@ class TestMaxPoolWithIndex_Op(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(set(['X']), ['Out']) self.check_grad({'X'}, ['Out'])
def init_test_case(self): def init_test_case(self):
self.op_type = "max_pool3d_with_index" self.op_type = "max_pool3d_with_index"
......
...@@ -91,16 +91,14 @@ class TestConditionalOp(unittest.TestCase): ...@@ -91,16 +91,14 @@ class TestConditionalOp(unittest.TestCase):
model_file = os.path.join(root_path.name, "while_net") model_file = os.path.join(root_path.name, "while_net")
paddle.jit.save(net, model_file) paddle.jit.save(net, model_file)
right_pdmodel = set( right_pdmodel = {
[ "uniform_random",
"uniform_random", "shape",
"shape", "slice",
"slice", "not_equal",
"not_equal", "while",
"while", "elementwise_add",
"elementwise_add", }
]
)
paddle.enable_static() paddle.enable_static()
pdmodel = getModelOp(model_file + ".pdmodel") pdmodel = getModelOp(model_file + ".pdmodel")
self.assertTrue( self.assertTrue(
...@@ -119,16 +117,14 @@ class TestConditionalOp(unittest.TestCase): ...@@ -119,16 +117,14 @@ class TestConditionalOp(unittest.TestCase):
model_file = os.path.join(root_path.name, "for_net") model_file = os.path.join(root_path.name, "for_net")
paddle.jit.save(net, model_file) paddle.jit.save(net, model_file)
right_pdmodel = set( right_pdmodel = {
[ "randint",
"randint", "fill_constant",
"fill_constant", "cast",
"cast", "less_than",
"less_than", "while",
"while", "elementwise_add",
"elementwise_add", }
]
)
paddle.enable_static() paddle.enable_static()
pdmodel = getModelOp(model_file + ".pdmodel") pdmodel = getModelOp(model_file + ".pdmodel")
self.assertTrue( self.assertTrue(
...@@ -147,16 +143,14 @@ class TestConditionalOp(unittest.TestCase): ...@@ -147,16 +143,14 @@ class TestConditionalOp(unittest.TestCase):
model_file = os.path.join(root_path.name, "if_net") model_file = os.path.join(root_path.name, "if_net")
paddle.jit.save(net, model_file) paddle.jit.save(net, model_file)
right_pdmodel = set( right_pdmodel = {
[ "assign_value",
"assign_value", "greater_than",
"greater_than", "cast",
"cast", "conditional_block",
"conditional_block", "logical_not",
"logical_not", "select_input",
"select_input", }
]
)
paddle.enable_static() paddle.enable_static()
pdmodel = getModelOp(model_file + ".pdmodel") pdmodel = getModelOp(model_file + ".pdmodel")
self.assertTrue( self.assertTrue(
......
...@@ -115,7 +115,7 @@ class TestSpectralNormOp(TestSpectralNormOpNoGrad): ...@@ -115,7 +115,7 @@ class TestSpectralNormOp(TestSpectralNormOpNoGrad):
self.check_grad( self.check_grad(
['Weight'], ['Weight'],
'Out', 'Out',
no_grad_set=set(["U", "V"]), no_grad_set={"U", "V"},
) )
def initTestCase(self): def initTestCase(self):
......
...@@ -99,7 +99,7 @@ class TestStrideSliceOp(OpTest): ...@@ -99,7 +99,7 @@ class TestStrideSliceOp(OpTest):
self.check_output(check_eager=True) self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(set(['Input']), 'Out', check_eager=True) self.check_grad({'Input'}, 'Out', check_eager=True)
def initTestCase(self): def initTestCase(self):
self.input = np.random.rand(100) self.input = np.random.rand(100)
......
...@@ -57,7 +57,7 @@ class TestTopkOp(OpTest): ...@@ -57,7 +57,7 @@ class TestTopkOp(OpTest):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(set(['X']), 'Out') self.check_grad({'X'}, 'Out')
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -79,7 +79,7 @@ class TestAffineChannelOp(XPUOpTest): ...@@ -79,7 +79,7 @@ class TestAffineChannelOp(XPUOpTest):
paddle.enable_static() paddle.enable_static()
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
self.check_grad_with_place( self.check_grad_with_place(
place, ['X'], 'Out', no_grad_set=set(['Scale', 'Bias']) place, ['X'], 'Out', no_grad_set={'Scale', 'Bias'}
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -276,7 +276,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper): ...@@ -276,7 +276,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper):
if core.is_compiled_with_xpu(): if core.is_compiled_with_xpu():
paddle.enable_static() paddle.enable_static()
self.check_grad_with_place( self.check_grad_with_place(
self.place, ['Input'], 'Output', no_grad_set=set(['Filter']) self.place, ['Input'], 'Output', no_grad_set={'Filter'}
) )
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
...@@ -285,7 +285,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper): ...@@ -285,7 +285,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper):
if core.is_compiled_with_xpu(): if core.is_compiled_with_xpu():
paddle.enable_static() paddle.enable_static()
self.check_grad_with_place( self.check_grad_with_place(
self.place, ['Filter'], 'Output', no_grad_set=set(['Input']) self.place, ['Filter'], 'Output', no_grad_set={'Input'}
) )
def init_test_case(self): def init_test_case(self):
...@@ -440,7 +440,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): ...@@ -440,7 +440,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper):
if core.is_compiled_with_xpu(): if core.is_compiled_with_xpu():
paddle.enable_static() paddle.enable_static()
self.check_grad_with_place( self.check_grad_with_place(
self.place, ['Input'], 'Output', no_grad_set=set(['Filter']) self.place, ['Input'], 'Output', no_grad_set={'Filter'}
) )
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
...@@ -450,7 +450,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): ...@@ -450,7 +450,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper):
if core.is_compiled_with_xpu(): if core.is_compiled_with_xpu():
paddle.enable_static() paddle.enable_static()
self.check_grad_with_place( self.check_grad_with_place(
self.place, ['Filter'], 'Output', no_grad_set=set(['Input']) self.place, ['Filter'], 'Output', no_grad_set={'Input'}
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -192,19 +192,19 @@ class XPUTestConv2DTransposeOp(XPUOpTestWrapper): ...@@ -192,19 +192,19 @@ class XPUTestConv2DTransposeOp(XPUOpTestWrapper):
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
if self.need_check_grad: if self.need_check_grad:
self.check_grad_with_place( self.check_grad_with_place(
self.place, ['Filter'], 'Output', no_grad_set=set(['Input']) self.place, ['Filter'], 'Output', no_grad_set={'Input'}
) )
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
if self.need_check_grad: if self.need_check_grad:
self.check_grad_with_place( self.check_grad_with_place(
self.place, ['Input'], 'Output', no_grad_set=set(['Filter']) self.place, ['Input'], 'Output', no_grad_set={'Filter'}
) )
def test_check_grad(self): def test_check_grad(self):
if self.need_check_grad: if self.need_check_grad:
self.check_grad_with_place( self.check_grad_with_place(
self.place, set(['Input', 'Filter']), 'Output' self.place, {'Input', 'Filter'}, 'Output'
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -275,7 +275,7 @@ class XPUTestConv3DOp(XPUOpTestWrapper): ...@@ -275,7 +275,7 @@ class XPUTestConv3DOp(XPUOpTestWrapper):
['Input'], ['Input'],
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Filter']), no_grad_set={'Filter'},
) )
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
...@@ -286,7 +286,7 @@ class XPUTestConv3DOp(XPUOpTestWrapper): ...@@ -286,7 +286,7 @@ class XPUTestConv3DOp(XPUOpTestWrapper):
['Filter'], ['Filter'],
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Input']), no_grad_set={'Input'},
) )
def init_test_case(self): def init_test_case(self):
...@@ -445,7 +445,7 @@ class XPUTestConv3DOp_v2(XPUOpTestWrapper): ...@@ -445,7 +445,7 @@ class XPUTestConv3DOp_v2(XPUOpTestWrapper):
['Input'], ['Input'],
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Filter']), no_grad_set={'Filter'},
) )
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
...@@ -455,7 +455,7 @@ class XPUTestConv3DOp_v2(XPUOpTestWrapper): ...@@ -455,7 +455,7 @@ class XPUTestConv3DOp_v2(XPUOpTestWrapper):
['Filter'], ['Filter'],
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Input']), no_grad_set={'Input'},
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -145,27 +145,27 @@ class XPUTestInstanceNormOp(XPUOpTestWrapper): ...@@ -145,27 +145,27 @@ class XPUTestInstanceNormOp(XPUOpTestWrapper):
class TestXPUInstanceNormOp6(XPUTestInstanceNormOp): class TestXPUInstanceNormOp6(XPUTestInstanceNormOp):
def set_attrs(self): def set_attrs(self):
self.shape = [10, 12, 32, 32] self.shape = [10, 12, 32, 32]
self.no_grad_set = set(['Scale', 'Bias']) self.no_grad_set = {'Scale', 'Bias'}
class TestXPUInstanceNormOp7(XPUTestInstanceNormOp): class TestXPUInstanceNormOp7(XPUTestInstanceNormOp):
def set_attrs(self): def set_attrs(self):
self.shape = [4, 5, 6, 7] self.shape = [4, 5, 6, 7]
self.no_grad_set = set(['Scale', 'Bias']) self.no_grad_set = {'Scale', 'Bias'}
class TestXPUInstanceNormOp8(XPUTestInstanceNormOp): class TestXPUInstanceNormOp8(XPUTestInstanceNormOp):
def set_attrs(self): def set_attrs(self):
self.shape = [1, 8, 16, 16] self.shape = [1, 8, 16, 16]
self.no_grad_set = set(['Scale', 'Bias']) self.no_grad_set = {'Scale', 'Bias'}
class TestXPUInstanceNormOp9(XPUTestInstanceNormOp): class TestXPUInstanceNormOp9(XPUTestInstanceNormOp):
def set_attrs(self): def set_attrs(self):
self.shape = [4, 16, 256, 128] self.shape = [4, 16, 256, 128]
self.no_grad_set = set(['Scale', 'Bias']) self.no_grad_set = {'Scale', 'Bias'}
class TestXPUInstanceNormOp10(XPUTestInstanceNormOp): class TestXPUInstanceNormOp10(XPUTestInstanceNormOp):
def set_attrs(self): def set_attrs(self):
self.shape = [10, 3, 512, 1] self.shape = [10, 3, 512, 1]
self.no_grad_set = set(['Scale', 'Bias']) self.no_grad_set = {'Scale', 'Bias'}
class TestInstanceNormOpError(XPUOpTest): class TestInstanceNormOpError(XPUOpTest):
def setUp(self): def setUp(self):
......
...@@ -79,8 +79,8 @@ class XPUTestKLDivLossOp(XPUOpTestWrapper): ...@@ -79,8 +79,8 @@ class XPUTestKLDivLossOp(XPUOpTestWrapper):
paddle.XPUPlace(0), paddle.XPUPlace(0),
['X'], ['X'],
'Loss', 'Loss',
no_grad_set=set(["Target"]), no_grad_set={"Target"},
check_dygraph=True, check_eager=True,
) )
def initTestCase(self): def initTestCase(self):
......
...@@ -346,7 +346,7 @@ class XPUTestPool2D_Op(XPUOpTestWrapper): ...@@ -346,7 +346,7 @@ class XPUTestPool2D_Op(XPUOpTestWrapper):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad(self): def test_check_grad(self):
self.check_grad_with_place(self.place, set(['X']), 'Out') self.check_grad_with_place(self.place, {'X'}, 'Out')
def init_data_format(self): def init_data_format(self):
self.data_format = "NCHW" self.data_format = "NCHW"
......
...@@ -344,7 +344,7 @@ class XPUTestPool3DOp(XPUOpTestWrapper): ...@@ -344,7 +344,7 @@ class XPUTestPool3DOp(XPUOpTestWrapper):
return return
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
self.check_grad_with_place(place, set(['X']), 'Out') self.check_grad_with_place(place, {'X'}, 'Out')
def init_data_format(self): def init_data_format(self):
self.data_format = "NCDHW" self.data_format = "NCDHW"
...@@ -532,7 +532,7 @@ class XPUTestPool3DOp(XPUOpTestWrapper): ...@@ -532,7 +532,7 @@ class XPUTestPool3DOp(XPUOpTestWrapper):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
self.check_grad_with_place(place, set(['X']), 'Out') self.check_grad_with_place(place, {'X'}, 'Out')
support_types = get_xpu_op_support_types('pool3d') support_types = get_xpu_op_support_types('pool3d')
......
...@@ -112,7 +112,7 @@ class XPUTestPoolWithIndex_op(XPUOpTestWrapper): ...@@ -112,7 +112,7 @@ class XPUTestPoolWithIndex_op(XPUOpTestWrapper):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad(self): def test_check_grad(self):
self.check_grad_with_place(self.place, set(['X']), ['Out']) self.check_grad_with_place(self.place, {'X'}, ['Out'])
def init_test_case(self): def init_test_case(self):
self.pool_forward_naive = max_pool2D_forward_naive self.pool_forward_naive = max_pool2D_forward_naive
......
...@@ -178,7 +178,7 @@ class XPUTestSequenceConv(XPUOpTestWrapper): ...@@ -178,7 +178,7 @@ class XPUTestSequenceConv(XPUOpTestWrapper):
def test_check_grad_padding_data(self): def test_check_grad_padding_data(self):
if self.padding_trainable: if self.padding_trainable:
self.check_grad( self.check_grad(
['PaddingData'], 'Out', no_grad_set=set(['X', 'Filter']) ['PaddingData'], 'Out', no_grad_set={'X', 'Filter'}
) )
def test_check_grad_Filter(self): def test_check_grad_Filter(self):
...@@ -189,20 +189,18 @@ class XPUTestSequenceConv(XPUOpTestWrapper): ...@@ -189,20 +189,18 @@ class XPUTestSequenceConv(XPUOpTestWrapper):
def test_check_grad_input_filter(self): def test_check_grad_input_filter(self):
if self.padding_trainable: if self.padding_trainable:
self.check_grad( self.check_grad(
['X', 'Filter'], 'Out', no_grad_set=set(['PaddingData']) ['X', 'Filter'], 'Out', no_grad_set={'PaddingData'}
) )
def test_check_grad_padding_input(self): def test_check_grad_padding_input(self):
if self.padding_trainable: if self.padding_trainable:
self.check_grad( self.check_grad(
self.inputs_val_no_f, 'Out', no_grad_set=set(['Filter']) self.inputs_val_no_f, 'Out', no_grad_set={'Filter'}
) )
def test_check_grad_padding_filter(self): def test_check_grad_padding_filter(self):
if self.padding_trainable: if self.padding_trainable:
self.check_grad( self.check_grad(self.inputs_val_no_x, 'Out', no_grad_set={'X'})
self.inputs_val_no_x, 'Out', no_grad_set=set(['X'])
)
def init_test_case(self): def init_test_case(self):
self.input_row = 7 self.input_row = 7
......
...@@ -197,11 +197,9 @@ class AttributeJstTransformer(BaseTransformer): ...@@ -197,11 +197,9 @@ class AttributeJstTransformer(BaseTransformer):
assert isinstance( assert isinstance(
node, gast.AST node, gast.AST
), "Input non-gast.AST node for the initialization of ToTensorTransformer." ), "Input non-gast.AST node for the initialization of ToTensorTransformer."
self.interested_name = set( self.interested_name = {
[ 'size',
'size', }
]
)
self.root = node self.root = node
def transform(self): def transform(self):
......
...@@ -288,7 +288,7 @@ class NameVisitor(gast.NodeVisitor): ...@@ -288,7 +288,7 @@ class NameVisitor(gast.NodeVisitor):
return new_name_ids return new_name_ids
def _is_call_func_name_node(self, node): def _is_call_func_name_node(self, node):
white_func_names = set(['append', 'extend']) white_func_names = {'append', 'extend'}
if len(self.ancestor_nodes) > 1: if len(self.ancestor_nodes) > 1:
assert self.ancestor_nodes[-1] == node assert self.ancestor_nodes[-1] == node
parent_node = self.ancestor_nodes[-2] parent_node = self.ancestor_nodes[-2]
......
...@@ -62,7 +62,7 @@ class Quant2Int8MkldnnPass: ...@@ -62,7 +62,7 @@ class Quant2Int8MkldnnPass:
] ]
self._ops_to_quantize = _ops_to_quantize self._ops_to_quantize = _ops_to_quantize
self._op_ids_to_skip = ( self._op_ids_to_skip = (
_op_ids_to_skip if _op_ids_to_skip is not None else set([-1]) _op_ids_to_skip if _op_ids_to_skip is not None else {-1}
) )
self._scale_immutable_ops = [ self._scale_immutable_ops = [
'transpose2', 'transpose2',
......
...@@ -387,7 +387,7 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase): ...@@ -387,7 +387,7 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase):
test_case_args.ops_to_quantize test_case_args.ops_to_quantize
) )
self._op_ids_to_skip = set([-1]) self._op_ids_to_skip = {-1}
if test_case_args.op_ids_to_skip: if test_case_args.op_ids_to_skip:
self._op_ids_to_skip = self._ints_from_csv( self._op_ids_to_skip = self._ints_from_csv(
test_case_args.op_ids_to_skip test_case_args.op_ids_to_skip
......
...@@ -325,7 +325,7 @@ class QuantInt8NLPComparisonTest(unittest.TestCase): ...@@ -325,7 +325,7 @@ class QuantInt8NLPComparisonTest(unittest.TestCase):
test_case_args.ops_to_quantize test_case_args.ops_to_quantize
) )
self._op_ids_to_skip = set([-1]) self._op_ids_to_skip = {-1}
if test_case_args.op_ids_to_skip: if test_case_args.op_ids_to_skip:
self._op_ids_to_skip = self._ints_from_csv( self._op_ids_to_skip = self._ints_from_csv(
test_case_args.op_ids_to_skip test_case_args.op_ids_to_skip
......
...@@ -887,7 +887,7 @@ def add_compile_flag(extra_compile_args, flags): ...@@ -887,7 +887,7 @@ def add_compile_flag(extra_compile_args, flags):
def is_cuda_file(path): def is_cuda_file(path):
cuda_suffix = set(['.cu']) cuda_suffix = {'.cu'}
items = os.path.splitext(path) items = os.path.splitext(path)
assert len(items) > 1 assert len(items) > 1
return items[-1] in cuda_suffix return items[-1] in cuda_suffix
......
...@@ -25,8 +25,8 @@ import sys ...@@ -25,8 +25,8 @@ import sys
from paddle import fluid from paddle import fluid
INTS = set(['int', 'int64_t']) INTS = {'int', 'int64_t'}
FLOATS = set(['float', 'double']) FLOATS = {'float', 'double'}
def get_all_kernels(): def get_all_kernels():
......
...@@ -192,7 +192,7 @@ def insert_api_into_dict(full_name, gen_doc_anno=None): ...@@ -192,7 +192,7 @@ def insert_api_into_dict(full_name, gen_doc_anno=None):
api_info_dict[fc_id]["all_names"].add(full_name) api_info_dict[fc_id]["all_names"].add(full_name)
else: else:
api_info_dict[fc_id] = { api_info_dict[fc_id] = {
"all_names": set([full_name]), "all_names": {full_name},
"id": fc_id, "id": fc_id,
"object": obj, "object": obj,
"type": type(obj).__name__, "type": type(obj).__name__,
......
...@@ -251,7 +251,7 @@ def is_required_match(requirestr, cbtitle='not-specified'): ...@@ -251,7 +251,7 @@ def is_required_match(requirestr, cbtitle='not-specified'):
None - skipped # trick None - skipped # trick
""" """
global SAMPLE_CODE_TEST_CAPACITY, RUN_ON_DEVICE # readonly global SAMPLE_CODE_TEST_CAPACITY, RUN_ON_DEVICE # readonly
requires = set(['cpu']) requires = {'cpu'}
if requirestr: if requirestr:
for r in requirestr.split(','): for r in requirestr.split(','):
rr = r.strip().lower() rr = r.strip().lower()
......
...@@ -77,7 +77,7 @@ class Test_is_primitive(unittest.TestCase): ...@@ -77,7 +77,7 @@ class Test_is_primitive(unittest.TestCase):
self.assertTrue(is_primitive(set())) self.assertTrue(is_primitive(set()))
self.assertTrue(is_primitive([1, 2])) self.assertTrue(is_primitive([1, 2]))
self.assertTrue(is_primitive((1.1, 2.2))) self.assertTrue(is_primitive((1.1, 2.2)))
self.assertTrue(is_primitive(set([1, 2.3]))) self.assertTrue(is_primitive({1, 2.3}))
self.assertFalse(is_primitive(range(3))) self.assertFalse(is_primitive(range(3)))
self.assertFalse(is_primitive({})) self.assertFalse(is_primitive({}))
self.assertFalse(is_primitive([1, 1j])) self.assertFalse(is_primitive([1, 1j]))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册