Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
9fa98349
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
9fa98349
编写于
3月 28, 2023
作者:
I
Infinity_lee
提交者:
GitHub
3月 28, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[CodeStyle][C405] Unnecessary <list/tuple> literal - rewrite as a set literal (#51972)
上级
7aa7fc49
变更
68
隐藏空白更改
内联
并排
Showing
68 changed file
with
347 addition
and
392 deletion
+347
-392
paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py
...luid/eager/auto_code_generator/generator/codegen_utils.py
+49
-51
paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
...le/fluid/eager/auto_code_generator/generator/eager_gen.py
+1
-1
paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py
...fluid/eager/auto_code_generator/generator/python_c_gen.py
+1
-1
pyproject.toml
pyproject.toml
+3
-0
python/paddle/distributed/auto_parallel/converter.py
python/paddle/distributed/auto_parallel/converter.py
+1
-1
python/paddle/distributed/auto_parallel/tuner/algorithms.py
python/paddle/distributed/auto_parallel/tuner/algorithms.py
+1
-1
python/paddle/distributed/auto_parallel/utils.py
python/paddle/distributed/auto_parallel/utils.py
+1
-1
python/paddle/distributed/fleet/meta_optimizers/parameter_server_optimizer.py
...buted/fleet/meta_optimizers/parameter_server_optimizer.py
+1
-1
python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py
.../paddle/distributed/fleet/meta_optimizers/ps_optimizer.py
+1
-1
python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py
...addle/distributed/fleet/meta_optimizers/sharding/shard.py
+3
-3
python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py
...addle/distributed/fleet/meta_optimizers/sharding/utils.py
+1
-1
python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py
...e/distributed/fleet/meta_optimizers/sharding_optimizer.py
+2
-2
python/paddle/distributed/passes/auto_parallel_sharding.py
python/paddle/distributed/passes/auto_parallel_sharding.py
+2
-2
python/paddle/fluid/tests/unittests/auto_parallel/test_dist_pnorm.py
...le/fluid/tests/unittests/auto_parallel/test_dist_pnorm.py
+6
-6
python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_sharding_model.py
...ttests/collective/fleet/hybrid_parallel_sharding_model.py
+24
-28
python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py
...ts/collective/fleet/test_fleet_sharding_meta_optimizer.py
+75
-89
python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py
...ddle/fluid/tests/unittests/dygraph_to_static/test_loop.py
+9
-9
python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py
...fluid/tests/unittests/ir/inference/inference_pass_test.py
+1
-1
python/paddle/fluid/tests/unittests/ir/test_convert_to_mixed_precision.py
...uid/tests/unittests/ir/test_convert_to_mixed_precision.py
+1
-1
python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py
...luid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py
+2
-2
python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py
...luid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py
+1
-1
python/paddle/fluid/tests/unittests/prim_op_test.py
python/paddle/fluid/tests/unittests/prim_op_test.py
+1
-1
python/paddle/fluid/tests/unittests/sequence/test_sequence_conv.py
...ddle/fluid/tests/unittests/sequence/test_sequence_conv.py
+4
-4
python/paddle/fluid/tests/unittests/test_affine_channel_op.py
...on/paddle/fluid/tests/unittests/test_affine_channel_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_backward.py
python/paddle/fluid/tests/unittests/test_backward.py
+7
-9
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
+3
-3
python/paddle/fluid/tests/unittests/test_conv2d_op.py
python/paddle/fluid/tests/unittests/test_conv2d_op.py
+10
-10
python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py
.../paddle/fluid/tests/unittests/test_conv2d_transpose_op.py
+6
-10
python/paddle/fluid/tests/unittests/test_conv3d_op.py
python/paddle/fluid/tests/unittests/test_conv3d_op.py
+4
-4
python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py
.../paddle/fluid/tests/unittests/test_conv3d_transpose_op.py
+6
-6
python/paddle/fluid/tests/unittests/test_data_norm_op.py
python/paddle/fluid/tests/unittests/test_data_norm_op.py
+5
-5
python/paddle/fluid/tests/unittests/test_deformable_conv_v1_op.py
...addle/fluid/tests/unittests/test_deformable_conv_v1_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_filter_by_instag_op.py
.../paddle/fluid/tests/unittests/test_filter_by_instag_op.py
+4
-12
python/paddle/fluid/tests/unittests/test_group_norm_op.py
python/paddle/fluid/tests/unittests/test_group_norm_op.py
+5
-5
python/paddle/fluid/tests/unittests/test_imperative_trace_non_persistable_inputs.py
...unittests/test_imperative_trace_non_persistable_inputs.py
+5
-7
python/paddle/fluid/tests/unittests/test_infer_no_need_buffer_slots.py
.../fluid/tests/unittests/test_infer_no_need_buffer_slots.py
+3
-3
python/paddle/fluid/tests/unittests/test_instance_norm_op.py
python/paddle/fluid/tests/unittests/test_instance_norm_op.py
+2
-2
python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py
python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_matmul_v2_op.py
python/paddle/fluid/tests/unittests/test_matmul_v2_op.py
+2
-2
python/paddle/fluid/tests/unittests/test_multiplex_op.py
python/paddle/fluid/tests/unittests/test_multiplex_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_operator_desc.py
python/paddle/fluid/tests/unittests/test_operator_desc.py
+10
-12
python/paddle/fluid/tests/unittests/test_pool2d_op.py
python/paddle/fluid/tests/unittests/test_pool2d_op.py
+6
-6
python/paddle/fluid/tests/unittests/test_pool3d_op.py
python/paddle/fluid/tests/unittests/test_pool3d_op.py
+6
-6
python/paddle/fluid/tests/unittests/test_pool_max_op.py
python/paddle/fluid/tests/unittests/test_pool_max_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_save_inference_model_conditional_op.py
...sts/unittests/test_save_inference_model_conditional_op.py
+24
-30
python/paddle/fluid/tests/unittests/test_spectral_norm_op.py
python/paddle/fluid/tests/unittests/test_spectral_norm_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_strided_slice_op.py
python/paddle/fluid/tests/unittests/test_strided_slice_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_top_k_op.py
python/paddle/fluid/tests/unittests/test_top_k_op.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_affine_channel_op_xpu.py
...e/fluid/tests/unittests/xpu/test_affine_channel_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py
...on/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py
+4
-4
python/paddle/fluid/tests/unittests/xpu/test_conv2d_transpose_op_xpu.py
...fluid/tests/unittests/xpu/test_conv2d_transpose_op_xpu.py
+3
-3
python/paddle/fluid/tests/unittests/xpu/test_conv3d_op_xpu.py
...on/paddle/fluid/tests/unittests/xpu/test_conv3d_op_xpu.py
+4
-4
python/paddle/fluid/tests/unittests/xpu/test_instance_norm_op_xpu.py
...le/fluid/tests/unittests/xpu/test_instance_norm_op_xpu.py
+5
-5
python/paddle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py
...addle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py
+2
-2
python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py
...on/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_pool3d_op_xpu.py
...on/paddle/fluid/tests/unittests/xpu/test_pool3d_op_xpu.py
+2
-2
python/paddle/fluid/tests/unittests/xpu/test_pool_max_op_xpu.py
.../paddle/fluid/tests/unittests/xpu/test_pool_max_op_xpu.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_sequence_conv_op_xpu.py
...le/fluid/tests/unittests/xpu/test_sequence_conv_op_xpu.py
+4
-6
python/paddle/jit/dy2static/basic_api_transformer.py
python/paddle/jit/dy2static/basic_api_transformer.py
+3
-5
python/paddle/jit/dy2static/ifelse_transformer.py
python/paddle/jit/dy2static/ifelse_transformer.py
+1
-1
python/paddle/static/quantization/quant2_int8_mkldnn_pass.py
python/paddle/static/quantization/quant2_int8_mkldnn_pass.py
+1
-1
python/paddle/static/quantization/tests/quant2_int8_image_classification_comparison.py
...tion/tests/quant2_int8_image_classification_comparison.py
+1
-1
python/paddle/static/quantization/tests/quant2_int8_nlp_comparison.py
...e/static/quantization/tests/quant2_int8_nlp_comparison.py
+1
-1
python/paddle/utils/cpp_extension/extension_utils.py
python/paddle/utils/cpp_extension/extension_utils.py
+1
-1
tools/check_op_register_type.py
tools/check_op_register_type.py
+2
-2
tools/print_signatures.py
tools/print_signatures.py
+1
-1
tools/sampcd_processor.py
tools/sampcd_processor.py
+1
-1
tools/test_print_signatures.py
tools/test_print_signatures.py
+1
-1
未找到文件。
paddle/fluid/eager/auto_code_generator/generator/codegen_utils.py
浏览文件 @
9fa98349
...
...
@@ -19,57 +19,55 @@ import yaml
####################
# Global Variables #
####################
ops_to_fill_zero_for_empty_grads
=
set
(
[
"split_grad"
,
"split_with_num_grad"
,
"rnn_grad"
,
"matmul_double_grad"
,
"matmul_triple_grad"
,
"sigmoid_double_grad"
,
"sigmoid_triple_grad"
,
"add_double_grad"
,
"add_triple_grad"
,
"multiply_grad"
,
"multiply_double_grad"
,
"multiply_triple_grad"
,
"conv2d_grad_grad"
,
"conv2d_transpose_double_grad"
,
"batch_norm_double_grad"
,
"tanh_grad"
,
"tanh_double_grad"
,
"tanh_triple_grad"
,
"sin_double_grad"
,
"sin_triple_grad"
,
"cos_double_grad"
,
"cos_triple_grad"
,
"subtract_double_grad"
,
"divide_double_grad"
,
"log_double_grad"
,
"elu_double_grad"
,
"leaky_relu_double_grad"
,
"sqrt_double_grad"
,
"rsqrt_double_grad"
,
"square_double_grad"
,
"celu_double_grad"
,
"pad_double_grad"
,
"pad3d_double_grad"
,
"squeeze_double_grad"
,
"unsqueeze_double_grad"
,
"instance_norm_double_grad"
,
"conv3d_double_grad"
,
"depthwise_conv2d_grad_grad"
,
"concat_double_grad"
,
"expand_grad"
,
"argsort_grad"
,
"eigh_grad"
,
"add_grad"
,
"subtract_grad"
,
"multiply_grad"
,
"divide_grad"
,
"matmul_grad"
,
]
)
ops_to_fill_zero_for_empty_grads
=
{
"split_grad"
,
"split_with_num_grad"
,
"rnn_grad"
,
"matmul_double_grad"
,
"matmul_triple_grad"
,
"sigmoid_double_grad"
,
"sigmoid_triple_grad"
,
"add_double_grad"
,
"add_triple_grad"
,
"multiply_grad"
,
"multiply_double_grad"
,
"multiply_triple_grad"
,
"conv2d_grad_grad"
,
"conv2d_transpose_double_grad"
,
"batch_norm_double_grad"
,
"tanh_grad"
,
"tanh_double_grad"
,
"tanh_triple_grad"
,
"sin_double_grad"
,
"sin_triple_grad"
,
"cos_double_grad"
,
"cos_triple_grad"
,
"subtract_double_grad"
,
"divide_double_grad"
,
"log_double_grad"
,
"elu_double_grad"
,
"leaky_relu_double_grad"
,
"sqrt_double_grad"
,
"rsqrt_double_grad"
,
"square_double_grad"
,
"celu_double_grad"
,
"pad_double_grad"
,
"pad3d_double_grad"
,
"squeeze_double_grad"
,
"unsqueeze_double_grad"
,
"instance_norm_double_grad"
,
"conv3d_double_grad"
,
"depthwise_conv2d_grad_grad"
,
"concat_double_grad"
,
"expand_grad"
,
"argsort_grad"
,
"eigh_grad"
,
"add_grad"
,
"subtract_grad"
,
"multiply_grad"
,
"divide_grad"
,
"matmul_grad"
,
}
# For API dispatch used at python-level
# { op_name : [arg_name, ...] }
...
...
paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
浏览文件 @
9fa98349
...
...
@@ -48,7 +48,7 @@ from codegen_utils import (
# But because there is no check in old dygraph mode, in order to
# keeping the code compatible, here we also skip inplace check in new dygraph temporarily,
# and this will be fixed in the futrue.
inplace_check_blacklist
=
set
([
"assign_out_"
])
inplace_check_blacklist
=
{
"assign_out_"
}
# Black Ops list that's NO NEED to apply code generation
black_ops_list
=
[
...
...
paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py
浏览文件 @
9fa98349
...
...
@@ -26,7 +26,7 @@ from codegen_utils import (
#########################
# Global Configurations #
#########################
skipped_forward_api_names
=
set
(
[]
)
skipped_forward_api_names
=
set
()
def
SkipAPIGeneration
(
forward_api_name
):
...
...
pyproject.toml
浏览文件 @
9fa98349
...
...
@@ -36,6 +36,9 @@ select = [
"C400"
,
"C401"
,
"C402"
,
"C403"
,
"C404"
,
"C405"
,
"C408"
,
"C409"
,
"C410"
,
...
...
python/paddle/distributed/auto_parallel/converter.py
浏览文件 @
9fa98349
...
...
@@ -482,7 +482,7 @@ class Converter:
split_indices_list
=
partition_index
split_indices_list
=
list
(
map
(
lambda
x
,
y
:
list
(
set
(
x
)
-
set
([
y
])
-
set
([
0
])
),
lambda
x
,
y
:
list
(
set
(
x
)
-
{
y
}
-
{
0
}
),
split_indices_list
,
complete_shape
,
)
...
...
python/paddle/distributed/auto_parallel/tuner/algorithms.py
浏览文件 @
9fa98349
...
...
@@ -119,7 +119,7 @@ class ShardingStageAlgorithm(AlgorithmBase):
stage_range
=
self
.
_config
.
sharding
.
get
(
"tuning_range"
,
None
)
if
stage_range
:
assert
set
(
stage_range
).
issubset
(
set
([
0
,
1
,
2
,
3
])
{
0
,
1
,
2
,
3
}
),
"Sharding Stage should belong into range within 0 - 3 but got {}."
.
format
(
stage_range
)
...
...
python/paddle/distributed/auto_parallel/utils.py
浏览文件 @
9fa98349
...
...
@@ -1172,7 +1172,7 @@ def _get_split_indices(
split_indices_list
=
partition_index
split_indices_list
=
list
(
map
(
lambda
x
,
y
:
list
(
set
(
x
)
-
set
([
y
])
-
set
([
0
])
),
lambda
x
,
y
:
list
(
set
(
x
)
-
{
y
}
-
{
0
}
),
split_indices_list
,
complete_shape
,
)
...
...
python/paddle/distributed/fleet/meta_optimizers/parameter_server_optimizer.py
浏览文件 @
9fa98349
...
...
@@ -303,7 +303,7 @@ class ParameterServerOptimizer(MetaOptimizerBase):
vars_metatools
,
)
processed_var_names
=
set
([
"@EMPTY@"
])
processed_var_names
=
{
"@EMPTY@"
}
param_memory_size
=
0
for
varname
in
program
.
global_block
().
vars
:
var
=
program
.
global_block
().
vars
[
varname
]
...
...
python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py
浏览文件 @
9fa98349
...
...
@@ -208,7 +208,7 @@ class ParameterServerOptimizer(MetaOptimizerBase):
return
False
free
=
get_sys_free_mem
()
processed_var_names
=
set
([
"@EMPTY@"
])
processed_var_names
=
{
"@EMPTY@"
}
param_memory_size
=
0
for
varname
in
program
.
global_block
().
vars
:
var
=
program
.
global_block
().
vars
[
varname
]
...
...
python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py
浏览文件 @
9fa98349
...
...
@@ -27,7 +27,7 @@ class Shard:
def
__init__
(
self
,
):
self
.
global_params
=
set
(
[]
)
self
.
global_params
=
set
()
self
.
worker_idx
=
-
1
self
.
worker_num
=
-
1
self
.
global_param2device
=
{}
...
...
@@ -96,8 +96,8 @@ class Shard:
return
-
1
def
find_broadcast_params
(
self
,
block
):
broadcast_vars
=
set
(
[]
)
fp16_params
=
set
(
[]
)
broadcast_vars
=
set
()
fp16_params
=
set
()
fp16_to_fp32
=
{}
param_usage
=
{
x
:
0
for
x
in
self
.
global_params
}
...
...
python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py
浏览文件 @
9fa98349
...
...
@@ -981,7 +981,7 @@ def add_sync_comm(program, sharding_ring_id):
assert
sharding_ring_id
>=
0
,
"sharding_ring_id should larger than zero"
block
=
program
.
global_block
()
not_sync_vars
=
set
(
[]
)
not_sync_vars
=
set
()
for
op
in
block
.
ops
:
if
op
.
type
in
[
"c_broadcast"
,
"c_allreduce"
]:
for
input_name
in
op
.
desc
.
input_arg_names
():
...
...
python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py
浏览文件 @
9fa98349
...
...
@@ -78,8 +78,8 @@ class ShardingOptimizer(MetaOptimizerBase):
self
.
_startup_program
=
None
self
.
_segments
=
[]
# params and fp16 params is for broadcast
self
.
_params
=
set
(
[]
)
self
.
_broadcast_vars
=
set
(
[]
)
self
.
_params
=
set
()
self
.
_broadcast_vars
=
set
()
# reduced grads to param name
self
.
_reduced_grads_to_param
=
{}
self
.
_shard
=
Shard
()
...
...
python/paddle/distributed/passes/auto_parallel_sharding.py
浏览文件 @
9fa98349
...
...
@@ -1831,8 +1831,8 @@ class ShardingInfo:
# and sharding should only broadcast the casted fp16 param
# instead of the origin fp32 version param.
def
get_broadcast_vars_and_param_usage
(
self
,
block
):
broadcast_vars
=
set
(
[]
)
fp16_params
=
set
(
[]
)
broadcast_vars
=
set
()
fp16_params
=
set
()
fp16_to_fp32
=
{}
param_usage
=
{
x
:
0
for
x
in
self
.
param_names
}
...
...
python/paddle/fluid/tests/unittests/auto_parallel/test_dist_pnorm.py
浏览文件 @
9fa98349
...
...
@@ -123,21 +123,21 @@ class TestDistPNormDP(TestDistPNorm):
assert
op_dist_attr
.
impl_type
==
"p_norm"
if
op
.
type
in
[
"p_norm"
,
"p_norm_grad"
]:
for
input_attr
in
op_dist_attr
.
inputs_dist_attrs
.
values
():
assert
set
(
input_attr
.
dims_mapping
)
==
set
([
-
1
])
assert
set
(
input_attr
.
dims_mapping
)
==
{
-
1
}
for
output_attr
in
op_dist_attr
.
outputs_dist_attrs
.
values
():
assert
set
(
output_attr
.
dims_mapping
)
==
set
([
-
1
])
assert
set
(
output_attr
.
dims_mapping
)
==
{
-
1
}
if
op
.
type
==
'c_allgather'
:
for
input_attr
in
op_dist_attr
.
inputs_dist_attrs
.
values
():
assert
input_attr
.
dims_mapping
[
0
]
==
0
assert
set
(
input_attr
.
dims_mapping
[
1
:])
==
set
([
-
1
])
assert
set
(
input_attr
.
dims_mapping
[
1
:])
==
{
-
1
}
for
output_attr
in
op_dist_attr
.
outputs_dist_attrs
.
values
():
assert
set
(
output_attr
.
dims_mapping
)
==
set
([
-
1
])
assert
set
(
output_attr
.
dims_mapping
)
==
{
-
1
}
if
op
.
type
==
'slice'
:
for
input_attr
in
op_dist_attr
.
inputs_dist_attrs
.
values
():
assert
set
(
input_attr
.
dims_mapping
)
==
set
([
-
1
])
assert
set
(
input_attr
.
dims_mapping
)
==
{
-
1
}
for
output_attr
in
op_dist_attr
.
outputs_dist_attrs
.
values
():
assert
output_attr
.
dims_mapping
[
0
]
==
0
assert
set
(
output_attr
.
dims_mapping
[
1
:])
==
set
([
-
1
])
assert
set
(
output_attr
.
dims_mapping
[
1
:])
==
{
-
1
}
assert
op_types
==
[
"c_allgather"
,
"p_norm"
,
...
...
python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_sharding_model.py
浏览文件 @
9fa98349
...
...
@@ -322,39 +322,35 @@ class TestDistMPTraning(unittest.TestCase):
)
def
test_sharding_adam
(
self
):
sharded_accumulators
=
set
(
[
'linear_0.w_0_moment1_0'
,
'linear_1.b_0_moment1_0'
,
'linear_2.b_0_moment1_0'
,
'embedding_0.w_0_moment1_0'
,
'linear_0.w_0_moment2_0'
,
'linear_1.b_0_moment2_0'
,
'linear_2.b_0_moment2_0'
,
'embedding_0.w_0_moment2_0'
,
'linear_0.w_0_beta1_pow_acc_0'
,
'linear_1.b_0_beta1_pow_acc_0'
,
'linear_2.b_0_beta1_pow_acc_0'
,
'embedding_0.w_0_beta1_pow_acc_0'
,
'linear_0.w_0_beta2_pow_acc_0'
,
'linear_1.b_0_beta2_pow_acc_0'
,
'linear_2.b_0_beta2_pow_acc_0'
,
'embedding_0.w_0_beta2_pow_acc_0'
,
]
)
sharded_accumulators
=
{
'linear_0.w_0_moment1_0'
,
'linear_1.b_0_moment1_0'
,
'linear_2.b_0_moment1_0'
,
'embedding_0.w_0_moment1_0'
,
'linear_0.w_0_moment2_0'
,
'linear_1.b_0_moment2_0'
,
'linear_2.b_0_moment2_0'
,
'embedding_0.w_0_moment2_0'
,
'linear_0.w_0_beta1_pow_acc_0'
,
'linear_1.b_0_beta1_pow_acc_0'
,
'linear_2.b_0_beta1_pow_acc_0'
,
'embedding_0.w_0_beta1_pow_acc_0'
,
'linear_0.w_0_beta2_pow_acc_0'
,
'linear_1.b_0_beta2_pow_acc_0'
,
'linear_2.b_0_beta2_pow_acc_0'
,
'embedding_0.w_0_beta2_pow_acc_0'
,
}
self
.
sharding_model
(
Optimizer
=
"adam"
,
sharded_accumulators
=
sharded_accumulators
)
def
test_sharding_momentum
(
self
):
sharded_accumulators
=
set
(
[
'linear_6.w_0_velocity_0'
,
'linear_7.b_0_velocity_0'
,
'linear_8.b_0_velocity_0'
,
'embedding_2.w_0_velocity_0'
,
]
)
sharded_accumulators
=
{
'linear_6.w_0_velocity_0'
,
'linear_7.b_0_velocity_0'
,
'linear_8.b_0_velocity_0'
,
'embedding_2.w_0_velocity_0'
,
}
self
.
sharding_model
(
Optimizer
=
"Momentum"
,
sharded_accumulators
=
sharded_accumulators
)
...
...
python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py
浏览文件 @
9fa98349
...
...
@@ -42,17 +42,15 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self
.
assertIn
(
'@BroadCast'
,
''
.
join
(
vars
))
self
.
assertEqual
(
set
(
parameters
),
set
(
[
"fc_1.b_0"
,
"fc_2.b_0"
,
"fc_2.w_0"
,
"fc_1.b_0_velocity_0"
,
"fc_2.b_0_velocity_0"
,
"fc_2.w_0_velocity_0"
,
"learning_rate_0"
,
]
),
{
"fc_1.b_0"
,
"fc_2.b_0"
,
"fc_2.w_0"
,
"fc_1.b_0_velocity_0"
,
"fc_2.b_0_velocity_0"
,
"fc_2.w_0_velocity_0"
,
"learning_rate_0"
,
},
)
self
.
assertEqual
(
...
...
@@ -123,20 +121,18 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self
.
assertIn
(
'check_finite_and_unscale'
,
ops
)
self
.
assertEqual
(
set
(
parameters
),
set
(
[
"fc_1.b_0"
,
"fc_2.b_0"
,
"fc_2.w_0"
,
"fc_1.b_0_velocity_0"
,
"fc_2.b_0_velocity_0"
,
"fc_2.w_0_velocity_0"
,
"learning_rate_0"
,
"loss_scaling_0"
,
"num_bad_steps_0"
,
"num_good_steps_0"
,
]
),
{
"fc_1.b_0"
,
"fc_2.b_0"
,
"fc_2.w_0"
,
"fc_1.b_0_velocity_0"
,
"fc_2.b_0_velocity_0"
,
"fc_2.w_0_velocity_0"
,
"learning_rate_0"
,
"loss_scaling_0"
,
"num_bad_steps_0"
,
"num_good_steps_0"
,
},
)
self
.
assertEqual
(
...
...
@@ -232,17 +228,15 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self
.
assertIn
(
'subprog'
,
''
.
join
(
vars
))
self
.
assertEqual
(
set
(
parameters
),
set
(
[
"fc_1.b_0"
,
"fc_2.b_0"
,
"fc_2.w_0"
,
"fc_1.b_0_velocity_0"
,
"fc_2.b_0_velocity_0"
,
"fc_2.w_0_velocity_0"
,
"learning_rate_0"
,
]
),
{
"fc_1.b_0"
,
"fc_2.b_0"
,
"fc_2.w_0"
,
"fc_1.b_0_velocity_0"
,
"fc_2.b_0_velocity_0"
,
"fc_2.w_0_velocity_0"
,
"learning_rate_0"
,
},
)
self
.
assertEqual
(
...
...
@@ -322,20 +316,18 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self
.
assertEqual
(
set
(
parameters
),
set
(
[
"fc_1.b_0"
,
"fc_2.b_0"
,
"fc_2.w_0"
,
"fc_1.b_0_velocity_0"
,
"fc_2.b_0_velocity_0"
,
"fc_2.w_0_velocity_0"
,
"learning_rate_0"
,
"loss_scaling_0"
,
"num_bad_steps_0"
,
"num_good_steps_0"
,
]
),
{
"fc_1.b_0"
,
"fc_2.b_0"
,
"fc_2.w_0"
,
"fc_1.b_0_velocity_0"
,
"fc_2.b_0_velocity_0"
,
"fc_2.w_0_velocity_0"
,
"learning_rate_0"
,
"loss_scaling_0"
,
"num_bad_steps_0"
,
"num_good_steps_0"
,
},
)
self
.
assertEqual
(
ops
,
...
...
@@ -448,23 +440,21 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self
.
assertEqual
(
set
(
parameters
),
set
(
[
'fc_2.b_0'
,
'num_good_steps_0'
,
'fc_2.w_0'
,
'loss_scaling_0'
,
'num_bad_steps_0'
,
'fc_2.w_0_velocity_0'
,
'fc_2.w_0.asp_mask'
,
'learning_rate_0'
,
'fc_1.b_0'
,
'fc_1.w_0.asp_mask'
,
'fc_0.w_0.asp_mask'
,
'fc_1.b_0_velocity_0'
,
'fc_2.b_0_velocity_0'
,
]
),
{
'fc_2.b_0'
,
'num_good_steps_0'
,
'fc_2.w_0'
,
'loss_scaling_0'
,
'num_bad_steps_0'
,
'fc_2.w_0_velocity_0'
,
'fc_2.w_0.asp_mask'
,
'learning_rate_0'
,
'fc_1.b_0'
,
'fc_1.w_0.asp_mask'
,
'fc_0.w_0.asp_mask'
,
'fc_1.b_0_velocity_0'
,
'fc_2.b_0_velocity_0'
,
},
)
self
.
assertEqual
(
ops
,
...
...
@@ -563,17 +553,15 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self
.
assertIn
(
'@BroadCast'
,
''
.
join
(
vars
))
self
.
assertEqual
(
set
(
parameters
),
set
(
[
"fc_1.b_0"
,
"fc_2.b_0"
,
"fc_2.w_0"
,
"fc_1.b_0_velocity_0"
,
"fc_2.b_0_velocity_0"
,
"fc_2.w_0_velocity_0"
,
"learning_rate_0"
,
]
),
{
"fc_1.b_0"
,
"fc_2.b_0"
,
"fc_2.w_0"
,
"fc_1.b_0_velocity_0"
,
"fc_2.b_0_velocity_0"
,
"fc_2.w_0_velocity_0"
,
"learning_rate_0"
,
},
)
self
.
assertEqual
(
...
...
@@ -650,17 +638,15 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self
.
assertIn
(
'@BroadCast'
,
''
.
join
(
vars
))
self
.
assertEqual
(
set
(
parameters
),
set
(
[
"fc_1.b_0"
,
"fc_2.b_0"
,
"fc_2.w_0"
,
"fc_1.b_0_velocity_0"
,
"fc_2.b_0_velocity_0"
,
"fc_2.w_0_velocity_0"
,
"learning_rate_0"
,
]
),
{
"fc_1.b_0"
,
"fc_2.b_0"
,
"fc_2.w_0"
,
"fc_1.b_0_velocity_0"
,
"fc_2.b_0_velocity_0"
,
"fc_2.w_0_velocity_0"
,
"learning_rate_0"
,
},
)
self
.
assertEqual
(
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py
浏览文件 @
9fa98349
...
...
@@ -238,12 +238,12 @@ class TestNameVisitor(unittest.TestCase):
for_loop_dufunc_with_listcomp
,
]
self
.
loop_var_names
=
[
set
([
"i"
,
"x"
])
,
set
([
"i"
,
"ret"
,
"max_len"
])
,
set
([
"i"
,
"x"
])
,
set
([
"j"
,
"array"
,
"res"
,
"x"
])
,
{
"i"
,
"x"
}
,
{
"i"
,
"ret"
,
"max_len"
}
,
{
"i"
,
"x"
}
,
{
"j"
,
"array"
,
"res"
,
"x"
}
,
]
self
.
create_var_names
=
[
set
(),
set
([
"ret"
]),
set
(),
set
([
"res"
,
"x"
])
]
self
.
create_var_names
=
[
set
(),
{
"ret"
},
set
(),
{
"res"
,
"x"
}
]
self
.
nested_for_loop_func
=
nested_for_loop_dyfunc
...
...
@@ -269,11 +269,11 @@ class TestNameVisitor(unittest.TestCase):
name_visitor
=
NameVisitor
(
gast_root
)
self
.
loop_var_names
=
[
set
([
"j"
,
"two"
])
,
set
([
"i"
,
"three"
,
"b"
])
,
set
([
"i"
])
,
{
"j"
,
"two"
}
,
{
"i"
,
"three"
,
"b"
}
,
{
"i"
}
,
]
self
.
create_var_names
=
[
set
(),
set
([
"b"
])
,
set
()]
self
.
create_var_names
=
[
set
(),
{
"b"
}
,
set
()]
i
=
0
for
node
in
gast
.
walk
(
gast_root
):
...
...
python/paddle/fluid/tests/unittests/ir/inference/inference_pass_test.py
浏览文件 @
9fa98349
...
...
@@ -46,7 +46,7 @@ class InferencePassTest(unittest.TestCase):
random
.
seed
(
1
)
def
_get_place
(
self
):
return
set
([
False
,
core
.
is_compiled_with_cuda
()])
return
{
False
,
core
.
is_compiled_with_cuda
()}
def
_save_models
(
self
,
dirname
,
feeded_var_names
,
target_vars
,
executor
,
program
,
scope
...
...
python/paddle/fluid/tests/unittests/ir/test_convert_to_mixed_precision.py
浏览文件 @
9fa98349
...
...
@@ -53,7 +53,7 @@ class TestConvertToMixedPrecision(unittest.TestCase):
PrecisionType
.
Bfloat16
,
]
keep_io_types_options
=
[
True
,
False
,
False
,
True
]
black_list_options
=
[
set
(),
set
(),
set
([
'conv2d'
])
,
set
()]
black_list_options
=
[
set
(),
set
(),
{
'conv2d'
}
,
set
()]
test_configs
=
zip
(
mixed_precision_options
,
keep_io_types_options
,
black_list_options
...
...
python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py
浏览文件 @
9fa98349
...
...
@@ -206,7 +206,7 @@ class TestConv2DWithGradBF16Op(TestConv2DBF16Op):
core
.
CPUPlace
(),
[
"Input"
],
"Output"
,
set
([
'Filter'
])
,
{
'Filter'
}
,
user_defined_grads
=
[
dx
],
user_defined_grad_outputs
=
[
convert_float_to_uint16
(
dout
)],
)
...
...
@@ -222,7 +222,7 @@ class TestConv2DWithGradBF16Op(TestConv2DBF16Op):
core
.
CPUPlace
(),
[
"Filter"
],
"Output"
,
set
([
'Input'
])
,
{
'Input'
}
,
user_defined_grads
=
[
dweights
],
user_defined_grad_outputs
=
[
convert_float_to_uint16
(
dout
)],
)
...
...
python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py
浏览文件 @
9fa98349
...
...
@@ -79,7 +79,7 @@ class TestPoolBf16MklDNNOpGrad(TestPool2D_Op_Mixin, OpTest):
)
x_grad
=
x_grad
/
np
.
prod
(
self
.
outputs
[
'Out'
].
shape
)
self
.
check_grad_with_place
(
core
.
CPUPlace
(),
set
([
'X'
])
,
'Out'
,
user_defined_grads
=
[
x_grad
]
core
.
CPUPlace
(),
{
'X'
}
,
'Out'
,
user_defined_grads
=
[
x_grad
]
)
...
...
python/paddle/fluid/tests/unittests/prim_op_test.py
浏览文件 @
9fa98349
...
...
@@ -164,7 +164,7 @@ class OpTestUtils:
if
api_params
==
[]:
results
.
append
(
input_arguments
)
return
results
api_ignore_param_list
=
set
([
'name'
,
'dtype'
,
'out'
,
'output'
])
api_ignore_param_list
=
{
'name'
,
'dtype'
,
'out'
,
'output'
}
idx_of_op_proto_arguments
=
0
for
idx
,
arg_name
in
enumerate
(
api_params
):
if
arg_name
in
api_ignore_param_list
:
...
...
python/paddle/fluid/tests/unittests/sequence/test_sequence_conv.py
浏览文件 @
9fa98349
...
...
@@ -179,7 +179,7 @@ class TestSeqProject(OpTest):
self
.
check_grad
(
[
'PaddingData'
],
'Out'
,
no_grad_set
=
set
([
'X'
,
'Filter'
])
,
no_grad_set
=
{
'X'
,
'Filter'
}
,
check_dygraph
=
False
,
)
...
...
@@ -198,7 +198,7 @@ class TestSeqProject(OpTest):
[
'X'
,
'Filter'
],
'Out'
,
max_relative_error
=
0.05
,
no_grad_set
=
set
([
'PaddingData'
])
,
no_grad_set
=
{
'PaddingData'
}
,
check_dygraph
=
False
,
)
...
...
@@ -208,7 +208,7 @@ class TestSeqProject(OpTest):
self
.
inputs_val_no_f
,
'Out'
,
max_relative_error
=
0.05
,
no_grad_set
=
set
([
'Filter'
])
,
no_grad_set
=
{
'Filter'
}
,
check_dygraph
=
False
,
)
...
...
@@ -218,7 +218,7 @@ class TestSeqProject(OpTest):
self
.
inputs_val_no_x
,
'Out'
,
max_relative_error
=
0.05
,
no_grad_set
=
set
([
'X'
])
,
no_grad_set
=
{
'X'
}
,
check_dygraph
=
False
,
)
...
...
python/paddle/fluid/tests/unittests/test_affine_channel_op.py
浏览文件 @
9fa98349
...
...
@@ -62,7 +62,7 @@ class TestAffineChannelOp(OpTest):
self
.
check_grad
(
[
'X'
],
'Out'
,
no_grad_set
=
set
([
'Scale'
,
'Bias'
])
,
no_grad_set
=
{
'Scale'
,
'Bias'
}
,
check_dygraph
=
False
,
)
...
...
python/paddle/fluid/tests/unittests/test_backward.py
浏览文件 @
9fa98349
...
...
@@ -187,16 +187,14 @@ class TestBackward(unittest.TestCase):
class
SimpleNet
(
BackwardNet
):
def
__init__
(
self
):
super
().
__init__
()
self
.
stop_gradient_grad_vars
=
set
(
[
'x_no_grad@GRAD'
,
'x2_no_grad@GRAD'
,
'x3_no_grad@GRAD'
,
'label_no_grad@GRAD'
,
]
)
self
.
stop_gradient_grad_vars
=
{
'x_no_grad@GRAD'
,
'x2_no_grad@GRAD'
,
'x3_no_grad@GRAD'
,
'label_no_grad@GRAD'
,
}
self
.
no_grad_vars
=
set
()
self
.
params_names
=
set
([
'w2v'
,
'fc_predict.b_0'
,
'fc_w'
])
self
.
params_names
=
{
'w2v'
,
'fc_predict.b_0'
,
'fc_w'
}
self
.
op_path
=
[
'lookup_table_v2'
,
'lookup_table_v2'
,
# embedding
...
...
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
浏览文件 @
9fa98349
...
...
@@ -617,7 +617,7 @@ class TestBatchNormOpTraining(unittest.TestCase):
class
TestBatchNormOpTrainingCase1
(
TestBatchNormOpTraining
):
def
init_test_case
(
self
):
self
.
use_global_stats
=
False
self
.
no_grad_set
=
set
([
'scale@GRAD'
,
'bias@GRAD'
])
self
.
no_grad_set
=
{
'scale@GRAD'
,
'bias@GRAD'
}
self
.
fetch_list
=
[
'y'
,
'mean'
,
'variance'
,
'x@GRAD'
]
...
...
@@ -641,7 +641,7 @@ class TestBatchNormOpTrainingCase2(TestBatchNormOpTraining):
class
TestBatchNormOpTrainingCase3
(
TestBatchNormOpTraining
):
def
init_test_case
(
self
):
self
.
use_global_stats
=
False
self
.
no_grad_set
=
set
([
'x@GRAD'
])
self
.
no_grad_set
=
{
'x@GRAD'
}
self
.
fetch_list
=
[
'y'
,
'mean'
,
'variance'
,
'scale@GRAD'
,
'bias@GRAD'
]
...
...
@@ -747,7 +747,7 @@ class TestBatchNormOpFreezeStatsAndScaleBiasTraining(
):
def
init_test_case
(
self
):
self
.
use_global_stats
=
True
self
.
no_grad_set
=
set
([
'scale@GRAD'
,
'bias@GRAD'
])
self
.
no_grad_set
=
{
'scale@GRAD'
,
'bias@GRAD'
}
self
.
fetch_list
=
[
'y'
,
'mean'
,
'variance'
,
'x@GRAD'
]
...
...
python/paddle/fluid/tests/unittests/test_conv2d_op.py
浏览文件 @
9fa98349
...
...
@@ -183,14 +183,14 @@ def create_test_cudnn_fp16_class(parent, grad_check=True):
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
)
and
grad_check
:
self
.
check_grad_with_place
(
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
set
([
'Filter'
])
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
{
'Filter'
}
)
def
test_check_grad_no_input
(
self
):
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
)
and
grad_check
:
self
.
check_grad_with_place
(
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
set
([
'Input'
])
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
{
'Input'
}
)
cls_name
=
"{0}_{1}"
.
format
(
parent
.
__name__
,
"CUDNNFp16"
)
...
...
@@ -231,7 +231,7 @@ def create_test_cudnn_bf16_class(parent):
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
set
([
'Filter'
])
,
no_grad_set
=
{
'Filter'
}
,
user_defined_grads
=
[
numeric_grads
],
)
...
...
@@ -242,7 +242,7 @@ def create_test_cudnn_bf16_class(parent):
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
set
([
'Input'
])
,
no_grad_set
=
{
'Input'
}
,
user_defined_grads
=
[
numeric_grads
],
)
...
...
@@ -307,14 +307,14 @@ def create_test_cudnn_channel_last_fp16_class(parent, grad_check=True):
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
)
and
grad_check
:
self
.
check_grad_with_place
(
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
set
([
'Filter'
])
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
{
'Filter'
}
)
def
test_check_grad_no_input
(
self
):
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_float16_supported
(
place
)
and
grad_check
:
self
.
check_grad_with_place
(
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
set
([
'Input'
])
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
{
'Input'
}
)
def
init_data_format
(
self
):
...
...
@@ -506,7 +506,7 @@ class TestConv2DOp(OpTest):
[
'Input'
],
'Output'
,
max_relative_error
=
0.02
,
no_grad_set
=
set
([
'Filter'
])
,
no_grad_set
=
{
'Filter'
}
,
check_dygraph
=
(
not
self
.
use_mkldnn
),
)
...
...
@@ -521,7 +521,7 @@ class TestConv2DOp(OpTest):
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
set
([
'Input'
])
,
no_grad_set
=
{
'Input'
}
,
check_dygraph
=
(
not
self
.
use_mkldnn
),
)
...
...
@@ -826,7 +826,7 @@ class TestConv2DOp_v2(OpTest):
[
'Input'
],
'Output'
,
max_relative_error
=
0.02
,
no_grad_set
=
set
([
'Filter'
])
,
no_grad_set
=
{
'Filter'
}
,
check_dygraph
=
(
not
self
.
use_mkldnn
),
)
...
...
@@ -839,7 +839,7 @@ class TestConv2DOp_v2(OpTest):
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
set
([
'Input'
])
,
no_grad_set
=
{
'Input'
}
,
check_dygraph
=
(
not
self
.
use_mkldnn
),
)
...
...
python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py
浏览文件 @
9fa98349
...
...
@@ -228,24 +228,20 @@ class TestConv2DTransposeOp(OpTest):
[
'Filter'
],
'Output'
,
max_relative_error
=
0.02
,
no_grad_set
=
set
([
'Input'
])
,
no_grad_set
=
{
'Input'
}
,
)
else
:
self
.
check_grad
(
[
'Filter'
],
'Output'
,
no_grad_set
=
set
([
'Input'
])
)
self
.
check_grad
([
'Filter'
],
'Output'
,
no_grad_set
=
{
'Input'
})
def
test_check_grad_no_filter
(
self
):
if
self
.
need_check_grad
:
if
self
.
use_cudnn
:
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
set
([
'Filter'
])
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
{
'Filter'
}
)
else
:
self
.
check_grad
(
[
'Input'
],
'Output'
,
no_grad_set
=
set
([
'Filter'
])
)
self
.
check_grad
([
'Input'
],
'Output'
,
no_grad_set
=
{
'Filter'
})
def
test_check_grad
(
self
):
if
self
.
need_check_grad
:
...
...
@@ -253,13 +249,13 @@ class TestConv2DTransposeOp(OpTest):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
set
([
'Input'
,
'Filter'
])
,
{
'Input'
,
'Filter'
}
,
'Output'
,
max_relative_error
=
0.02
,
)
else
:
self
.
check_grad
(
set
([
'Input'
,
'Filter'
])
,
'Output'
,
max_relative_error
=
0.02
{
'Input'
,
'Filter'
}
,
'Output'
,
max_relative_error
=
0.02
)
def
init_test_case
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_conv3d_op.py
浏览文件 @
9fa98349
...
...
@@ -380,7 +380,7 @@ class TestConv3DOp(OpTest):
[
'Input'
],
'Output'
,
max_relative_error
=
0.03
,
no_grad_set
=
set
([
'Filter'
])
,
no_grad_set
=
{
'Filter'
}
,
check_dygraph
=
(
not
self
.
use_mkldnn
),
)
...
...
@@ -394,7 +394,7 @@ class TestConv3DOp(OpTest):
[
'Filter'
],
'Output'
,
max_relative_error
=
0.03
,
no_grad_set
=
set
([
'Input'
])
,
no_grad_set
=
{
'Input'
}
,
check_dygraph
=
(
not
self
.
use_mkldnn
),
)
...
...
@@ -694,7 +694,7 @@ class TestConv3DOp_2(OpTest):
[
'Input'
],
'Output'
,
max_relative_error
=
0.03
,
no_grad_set
=
set
([
'Filter'
])
,
no_grad_set
=
{
'Filter'
}
,
)
def
test_check_grad_no_input
(
self
):
...
...
@@ -706,7 +706,7 @@ class TestConv3DOp_2(OpTest):
[
'Filter'
],
'Output'
,
max_relative_error
=
0.03
,
no_grad_set
=
set
([
'Input'
])
,
no_grad_set
=
{
'Input'
}
,
)
def
init_test_case
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py
浏览文件 @
9fa98349
...
...
@@ -206,13 +206,13 @@ class TestConv3DTransposeOp(OpTest):
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
set
([
'Input'
,
'Filter'
])
,
{
'Input'
,
'Filter'
}
,
'Output'
,
max_relative_error
=
0.03
,
)
else
:
self
.
check_grad
(
set
([
'Input'
,
'Filter'
])
,
'Output'
,
max_relative_error
=
0.03
{
'Input'
,
'Filter'
}
,
'Output'
,
max_relative_error
=
0.03
)
def
test_check_grad_no_filter
(
self
):
...
...
@@ -223,14 +223,14 @@ class TestConv3DTransposeOp(OpTest):
[
'Input'
],
'Output'
,
max_relative_error
=
0.03
,
no_grad_set
=
set
([
'Filter'
])
,
no_grad_set
=
{
'Filter'
}
,
)
elif
self
.
check_no_filter
:
self
.
check_grad
(
[
'Input'
],
'Output'
,
max_relative_error
=
0.03
,
no_grad_set
=
set
([
'Filter'
])
,
no_grad_set
=
{
'Filter'
}
,
)
def
test_check_grad_no_input
(
self
):
...
...
@@ -241,14 +241,14 @@ class TestConv3DTransposeOp(OpTest):
[
'Filter'
],
'Output'
,
max_relative_error
=
0.03
,
no_grad_set
=
set
([
'Input'
])
,
no_grad_set
=
{
'Input'
}
,
)
elif
self
.
check_no_input
:
self
.
check_grad
(
[
'Filter'
],
'Output'
,
max_relative_error
=
0.03
,
no_grad_set
=
set
([
'Input'
])
,
no_grad_set
=
{
'Input'
}
,
)
def
init_test_case
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_data_norm_op.py
浏览文件 @
9fa98349
...
...
@@ -276,7 +276,7 @@ class TestDataNormOp(OpTest):
test check backward, check grad
"""
# NODE(yjjiang11): This op will be deprecated.
self
.
check_grad
([
'X'
],
'Y'
,
no_grad_set
=
set
(
[]
),
check_dygraph
=
False
)
self
.
check_grad
([
'X'
],
'Y'
,
no_grad_set
=
set
(),
check_dygraph
=
False
)
class
TestDataNormOpWithEnableScaleAndShift
(
OpTest
):
...
...
@@ -340,7 +340,7 @@ class TestDataNormOpWithEnableScaleAndShift(OpTest):
test check backward, check grad
"""
# NODE(yjjiang11): This op will be deprecated.
self
.
check_grad
([
'X'
],
'Y'
,
no_grad_set
=
set
(
[]
),
check_dygraph
=
False
)
self
.
check_grad
([
'X'
],
'Y'
,
no_grad_set
=
set
(),
check_dygraph
=
False
)
class
TestDataNormOpWithoutEnableScaleAndShift
(
OpTest
):
...
...
@@ -399,7 +399,7 @@ class TestDataNormOpWithoutEnableScaleAndShift(OpTest):
test check backward, check grad
"""
# NODE(yjjiang11): This op will be deprecated.
self
.
check_grad
([
'X'
],
'Y'
,
no_grad_set
=
set
(
[]
),
check_dygraph
=
False
)
self
.
check_grad
([
'X'
],
'Y'
,
no_grad_set
=
set
(),
check_dygraph
=
False
)
class
TestDataNormOpWithEnableScaleAndShift_1
(
OpTest
):
...
...
@@ -463,7 +463,7 @@ class TestDataNormOpWithEnableScaleAndShift_1(OpTest):
test check backward, check grad
"""
# NODE(yjjiang11): This op will be deprecated.
self
.
check_grad
([
'X'
],
'Y'
,
no_grad_set
=
set
(
[]
),
check_dygraph
=
False
)
self
.
check_grad
([
'X'
],
'Y'
,
no_grad_set
=
set
(),
check_dygraph
=
False
)
class
TestDataNormOpWithSlotDim
(
OpTest
):
...
...
@@ -521,7 +521,7 @@ class TestDataNormOpWithSlotDim(OpTest):
test check backward, check grad
"""
# NODE(yjjiang11): This op will be deprecated.
self
.
check_grad
([
'X'
],
'Y'
,
no_grad_set
=
set
(
[]
),
check_dygraph
=
False
)
self
.
check_grad
([
'X'
],
'Y'
,
no_grad_set
=
set
(),
check_dygraph
=
False
)
class
TestDataNormOpErrorr
(
unittest
.
TestCase
):
...
...
python/paddle/fluid/tests/unittests/test_deformable_conv_v1_op.py
浏览文件 @
9fa98349
...
...
@@ -202,7 +202,7 @@ class TestModulatedDeformableConvOp(OpTest):
[
'Input'
,
'Offset'
],
'Output'
,
max_relative_error
=
0.1
,
no_grad_set
=
set
([
'Filter'
])
,
no_grad_set
=
{
'Filter'
}
,
)
def
init_test_case
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_filter_by_instag_op.py
浏览文件 @
9fa98349
...
...
@@ -73,9 +73,7 @@ class TestFilterByInstagOp(OpTest):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
[
'Ins'
],
'Out'
,
no_grad_set
=
set
([
'Ins_tag'
,
'Filter_tag'
])
)
self
.
check_grad
([
'Ins'
],
'Out'
,
no_grad_set
=
{
'Ins_tag'
,
'Filter_tag'
})
"""This is Test Case 2"""
...
...
@@ -119,9 +117,7 @@ class TestFilterByInstagOp2(OpTest):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
[
'Ins'
],
'Out'
,
no_grad_set
=
set
([
'Ins_tag'
,
'Filter_tag'
])
)
self
.
check_grad
([
'Ins'
],
'Out'
,
no_grad_set
=
{
'Ins_tag'
,
'Filter_tag'
})
"""This is Test Case 3"""
...
...
@@ -162,9 +158,7 @@ class TestFilterByInstagOp3(OpTest):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
[
'Ins'
],
'Out'
,
no_grad_set
=
set
([
'Ins_tag'
,
'Filter_tag'
])
)
self
.
check_grad
([
'Ins'
],
'Out'
,
no_grad_set
=
{
'Ins_tag'
,
'Filter_tag'
})
"""This is Test Case 4"""
...
...
@@ -204,9 +198,7 @@ class TestFilterByInstagOp4(OpTest):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
[
'Ins'
],
'Out'
,
no_grad_set
=
set
([
'Ins_tag'
,
'Filter_tag'
])
)
self
.
check_grad
([
'Ins'
],
'Out'
,
no_grad_set
=
{
'Ins_tag'
,
'Filter_tag'
})
class
TestFilterByInstagOp6
(
OpTest
):
...
...
python/paddle/fluid/tests/unittests/test_group_norm_op.py
浏览文件 @
9fa98349
...
...
@@ -126,7 +126,7 @@ class TestGroupNormOp(OpTest):
self
.
op
=
create_op
(
self
.
scope
,
self
.
op_type
,
op_inputs
,
op_outputs
,
op_attrs
)
inputs_to_check
=
set
([
'X'
,
'Scale'
,
'Bias'
])
inputs_to_check
=
{
'X'
,
'Scale'
,
'Bias'
}
output_names
=
'Y'
cpu_grads
=
self
.
_get_gradient
(
inputs_to_check
,
place
,
output_names
,
None
...
...
@@ -148,12 +148,12 @@ class TestGroupNormOp(OpTest):
return
place
=
core
.
CPUPlace
()
self
.
check_grad_with_place
(
place
,
set
([
'X'
,
'Scale'
,
'Bias'
])
,
'Y'
)
self
.
check_grad_with_place
(
place
,
{
'X'
,
'Scale'
,
'Bias'
}
,
'Y'
)
if
core
.
is_compiled_with_cuda
():
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
set
([
'X'
,
'Scale'
,
'Bias'
])
,
{
'X'
,
'Scale'
,
'Bias'
}
,
'Y'
,
)
...
...
@@ -187,7 +187,7 @@ class TestGroupNormFP16OP(TestGroupNormOp):
return
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
set
([
'X'
,
'Scale'
,
'Bias'
])
,
'Y'
)
self
.
check_grad_with_place
(
place
,
{
'X'
,
'Scale'
,
'Bias'
}
,
'Y'
)
def
init_test_case
(
self
):
self
.
dtype
=
np
.
float16
...
...
@@ -250,7 +250,7 @@ class TestGroupNormBF16Op(OpTest):
return
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
set
([
'X'
,
'Scale'
,
'Bias'
])
,
'Y'
)
self
.
check_grad_with_place
(
place
,
{
'X'
,
'Scale'
,
'Bias'
}
,
'Y'
)
def
init_test_case
(
self
):
pass
...
...
python/paddle/fluid/tests/unittests/test_imperative_trace_non_persistable_inputs.py
浏览文件 @
9fa98349
...
...
@@ -48,13 +48,11 @@ class TestTracedLayerRecordNonPersistableInput(unittest.TestCase):
learning_rate
=
1e-3
,
parameter_list
=
layer
.
parameters
()
)
expected_persistable_vars
=
set
(
[
layer
.
_linear
.
weight
.
name
,
layer
.
_linear
.
bias
.
name
,
layer
.
_offset
.
name
,
]
)
expected_persistable_vars
=
{
layer
.
_linear
.
weight
.
name
,
layer
.
_linear
.
bias
.
name
,
layer
.
_offset
.
name
,
}
for
_
in
range
(
10
):
in_x
=
fluid
.
dygraph
.
to_variable
(
...
...
python/paddle/fluid/tests/unittests/test_infer_no_need_buffer_slots.py
浏览文件 @
9fa98349
...
...
@@ -60,7 +60,7 @@ class TestInferNoNeedBufferSlots(unittest.TestCase):
core
.
infer_no_need_buffer_slots
(
op
.
type
,
inputs
,
outputs
,
attrs
),
set
(
[]
),
set
(),
)
elif
idx
==
1
:
# fill constant op
...
...
@@ -68,7 +68,7 @@ class TestInferNoNeedBufferSlots(unittest.TestCase):
core
.
infer_no_need_buffer_slots
(
op
.
type
,
inputs
,
outputs
,
attrs
),
set
(
[]
),
set
(),
)
else
:
# elementwise_add_grad op
...
...
@@ -76,7 +76,7 @@ class TestInferNoNeedBufferSlots(unittest.TestCase):
core
.
infer_no_need_buffer_slots
(
op
.
type
,
inputs
,
outputs
,
attrs
),
set
([
'Y'
,
'X'
])
,
{
'Y'
,
'X'
}
,
)
...
...
python/paddle/fluid/tests/unittests/test_instance_norm_op.py
浏览文件 @
9fa98349
...
...
@@ -221,14 +221,14 @@ class TestInstanceNormOpTraining(unittest.TestCase):
class
TestInstanceNormOpTrainingCase1
(
TestInstanceNormOpTraining
):
def
init_test_case
(
self
):
self
.
shape
=
[
2
,
3
,
4
,
5
]
self
.
no_grad_set
=
set
([
'scale@GRAD'
,
'bias@GRAD'
])
self
.
no_grad_set
=
{
'scale@GRAD'
,
'bias@GRAD'
}
self
.
fetch_list
=
[
'y'
,
'saved_mean'
,
'saved_variance'
,
'x@GRAD'
]
class
TestInstanceNormOpTrainingCase2
(
TestInstanceNormOpTraining
):
def
init_test_case
(
self
):
self
.
shape
=
[
20
,
50
,
4
,
5
]
self
.
no_grad_set
=
set
([
'scale@GRAD'
,
'bias@GRAD'
])
self
.
no_grad_set
=
{
'scale@GRAD'
,
'bias@GRAD'
}
self
.
fetch_list
=
[
'y'
,
'saved_mean'
,
'saved_variance'
,
'x@GRAD'
]
...
...
python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py
浏览文件 @
9fa98349
...
...
@@ -534,7 +534,7 @@ class TestCUDNNLstmOp(OpTest):
for
var_name
in
var_name_list
:
self
.
check_grad_with_place
(
place
,
set
([
'Input'
,
var_name
,
'InitH'
,
'InitC'
])
,
{
'Input'
,
var_name
,
'InitH'
,
'InitC'
}
,
[
'Out'
,
'LastH'
,
'LastC'
],
)
...
...
python/paddle/fluid/tests/unittests/test_matmul_v2_op.py
浏览文件 @
9fa98349
...
...
@@ -414,7 +414,7 @@ def create_test_bf16_class(parent, atol=0.01):
place
,
[
'X'
],
'Out'
,
no_grad_set
=
set
([
'Y'
])
,
no_grad_set
=
{
'Y'
}
,
user_defined_grads
=
[
numeric_grads
],
)
...
...
@@ -425,7 +425,7 @@ def create_test_bf16_class(parent, atol=0.01):
place
,
[
'Y'
],
'Out'
,
no_grad_set
=
set
([
'X'
])
,
no_grad_set
=
{
'X'
}
,
user_defined_grads
=
[
numeric_grads
],
)
...
...
python/paddle/fluid/tests/unittests/test_multiplex_op.py
浏览文件 @
9fa98349
...
...
@@ -54,7 +54,7 @@ class TestMultiplexOp(OpTest):
self
.
check_grad
([
'x2'
,
'x3'
,
'x4'
],
'Out'
,
no_grad_set
=
set
(
'x1'
))
def
test_check_grad_ignore_x1_x2
(
self
):
self
.
check_grad
([
'x3'
,
'x4'
],
'Out'
,
no_grad_set
=
set
([
'x1'
,
'x2'
])
)
self
.
check_grad
([
'x3'
,
'x4'
],
'Out'
,
no_grad_set
=
{
'x1'
,
'x2'
}
)
def
test_check_grad_ignore_x3
(
self
):
self
.
check_grad
([
'x1'
,
'x2'
,
'x4'
],
'Out'
,
no_grad_set
=
set
(
'x3'
))
...
...
python/paddle/fluid/tests/unittests/test_operator_desc.py
浏览文件 @
9fa98349
...
...
@@ -66,18 +66,16 @@ class TestOperator(unittest.TestCase):
self
.
assertEqual
(
mul_op
.
output
(
"Out"
),
[
"mul.out"
])
self
.
assertEqual
(
set
(
mul_op
.
attr_names
),
set
(
[
"x_num_col_dims"
,
"y_num_col_dims"
,
"op_role"
,
"op_role_var"
,
"op_namescope"
,
"op_callstack"
,
"op_device"
,
"with_quant_attr"
,
]
),
{
"x_num_col_dims"
,
"y_num_col_dims"
,
"op_role"
,
"op_role_var"
,
"op_namescope"
,
"op_callstack"
,
"op_device"
,
"with_quant_attr"
,
},
)
self
.
assertEqual
(
mul_op
.
has_attr
(
"x_num_col_dims"
),
True
)
self
.
assertEqual
(
mul_op
.
attr_type
(
"x_num_col_dims"
),
core
.
AttrType
.
INT
)
...
...
python/paddle/fluid/tests/unittests/test_pool2d_op.py
浏览文件 @
9fa98349
...
...
@@ -358,14 +358,14 @@ class TestPool2D_Op_Mixin:
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
set
([
'X'
])
,
{
'X'
}
,
'Out'
,
max_relative_error
=
0.07
,
check_dygraph
=
(
not
self
.
use_mkldnn
),
)
elif
self
.
pool_type
!=
"max"
:
self
.
check_grad
(
set
([
'X'
])
,
{
'X'
}
,
'Out'
,
max_relative_error
=
0.07
,
check_dygraph
=
(
not
self
.
use_mkldnn
),
...
...
@@ -524,7 +524,7 @@ def create_test_cudnn_fp16_class(parent, check_grad=True):
):
self
.
check_grad_with_place
(
place
,
set
([
'X'
])
,
{
'X'
}
,
'Out'
,
max_relative_error
=
0.07
,
check_dygraph
=
(
not
self
.
use_mkldnn
),
...
...
@@ -565,7 +565,7 @@ def create_test_fp16_class(parent, check_grad=True):
):
self
.
check_grad_with_place
(
place
,
set
([
'X'
])
,
{
'X'
}
,
'Out'
,
max_relative_error
=
0.07
,
check_dygraph
=
(
not
self
.
use_mkldnn
),
...
...
@@ -864,10 +864,10 @@ class TestCase5_Max(TestCase2):
if
self
.
has_cudnn
()
and
self
.
pool_type
==
"max"
:
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
set
([
'X'
])
,
'Out'
,
max_relative_error
=
1.00
place
,
{
'X'
}
,
'Out'
,
max_relative_error
=
1.00
)
elif
self
.
pool_type
==
"max"
:
self
.
check_grad
(
set
([
'X'
])
,
'Out'
,
max_relative_error
=
1.00
)
self
.
check_grad
(
{
'X'
}
,
'Out'
,
max_relative_error
=
1.00
)
class
TestCase5_channel_last_Max
(
TestCase5_Max
):
...
...
python/paddle/fluid/tests/unittests/test_pool3d_op.py
浏览文件 @
9fa98349
...
...
@@ -339,15 +339,15 @@ class TestPool3D_Op(OpTest):
place
=
core
.
CUDAPlace
(
0
)
if
core
.
is_compiled_with_rocm
():
self
.
check_grad_with_place
(
place
,
set
([
'X'
])
,
'Out'
,
max_relative_error
=
1e-2
place
,
{
'X'
}
,
'Out'
,
max_relative_error
=
1e-2
)
else
:
self
.
check_grad_with_place
(
place
,
set
([
'X'
])
,
'Out'
)
self
.
check_grad_with_place
(
place
,
{
'X'
}
,
'Out'
)
elif
self
.
pool_type
!=
"max"
:
if
core
.
is_compiled_with_rocm
():
self
.
check_grad
(
set
([
'X'
])
,
'Out'
,
max_relative_error
=
1e-2
)
self
.
check_grad
(
{
'X'
}
,
'Out'
,
max_relative_error
=
1e-2
)
else
:
self
.
check_grad
(
set
([
'X'
])
,
'Out'
)
self
.
check_grad
(
{
'X'
}
,
'Out'
)
def
init_data_format
(
self
):
self
.
data_format
=
"NCDHW"
...
...
@@ -783,10 +783,10 @@ class TestCase5_Max(TestCase2):
if
self
.
has_cudnn
()
and
self
.
pool_type
==
"max"
:
place
=
core
.
CUDAPlace
(
0
)
self
.
check_grad_with_place
(
place
,
set
([
'X'
])
,
'Out'
,
max_relative_error
=
1.00
place
,
{
'X'
}
,
'Out'
,
max_relative_error
=
1.00
)
elif
self
.
pool_type
==
"max"
:
self
.
check_grad
(
set
([
'X'
])
,
'Out'
,
max_relative_error
=
1.00
)
self
.
check_grad
(
{
'X'
}
,
'Out'
,
max_relative_error
=
1.00
)
class
TestCase5_channel_last_Max
(
TestCase5_Max
):
...
...
python/paddle/fluid/tests/unittests/test_pool_max_op.py
浏览文件 @
9fa98349
...
...
@@ -163,7 +163,7 @@ class TestMaxPoolWithIndex_Op(OpTest):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
set
([
'X'
])
,
[
'Out'
])
self
.
check_grad
(
{
'X'
}
,
[
'Out'
])
def
init_test_case
(
self
):
self
.
op_type
=
"max_pool3d_with_index"
...
...
python/paddle/fluid/tests/unittests/test_save_inference_model_conditional_op.py
浏览文件 @
9fa98349
...
...
@@ -91,16 +91,14 @@ class TestConditionalOp(unittest.TestCase):
model_file
=
os
.
path
.
join
(
root_path
.
name
,
"while_net"
)
paddle
.
jit
.
save
(
net
,
model_file
)
right_pdmodel
=
set
(
[
"uniform_random"
,
"shape"
,
"slice"
,
"not_equal"
,
"while"
,
"elementwise_add"
,
]
)
right_pdmodel
=
{
"uniform_random"
,
"shape"
,
"slice"
,
"not_equal"
,
"while"
,
"elementwise_add"
,
}
paddle
.
enable_static
()
pdmodel
=
getModelOp
(
model_file
+
".pdmodel"
)
self
.
assertTrue
(
...
...
@@ -119,16 +117,14 @@ class TestConditionalOp(unittest.TestCase):
model_file
=
os
.
path
.
join
(
root_path
.
name
,
"for_net"
)
paddle
.
jit
.
save
(
net
,
model_file
)
right_pdmodel
=
set
(
[
"randint"
,
"fill_constant"
,
"cast"
,
"less_than"
,
"while"
,
"elementwise_add"
,
]
)
right_pdmodel
=
{
"randint"
,
"fill_constant"
,
"cast"
,
"less_than"
,
"while"
,
"elementwise_add"
,
}
paddle
.
enable_static
()
pdmodel
=
getModelOp
(
model_file
+
".pdmodel"
)
self
.
assertTrue
(
...
...
@@ -147,16 +143,14 @@ class TestConditionalOp(unittest.TestCase):
model_file
=
os
.
path
.
join
(
root_path
.
name
,
"if_net"
)
paddle
.
jit
.
save
(
net
,
model_file
)
right_pdmodel
=
set
(
[
"assign_value"
,
"greater_than"
,
"cast"
,
"conditional_block"
,
"logical_not"
,
"select_input"
,
]
)
right_pdmodel
=
{
"assign_value"
,
"greater_than"
,
"cast"
,
"conditional_block"
,
"logical_not"
,
"select_input"
,
}
paddle
.
enable_static
()
pdmodel
=
getModelOp
(
model_file
+
".pdmodel"
)
self
.
assertTrue
(
...
...
python/paddle/fluid/tests/unittests/test_spectral_norm_op.py
浏览文件 @
9fa98349
...
...
@@ -115,7 +115,7 @@ class TestSpectralNormOp(TestSpectralNormOpNoGrad):
self
.
check_grad
(
[
'Weight'
],
'Out'
,
no_grad_set
=
set
([
"U"
,
"V"
])
,
no_grad_set
=
{
"U"
,
"V"
}
,
)
def
initTestCase
(
self
):
...
...
python/paddle/fluid/tests/unittests/test_strided_slice_op.py
浏览文件 @
9fa98349
...
...
@@ -99,7 +99,7 @@ class TestStrideSliceOp(OpTest):
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
(
set
([
'Input'
])
,
'Out'
,
check_eager
=
True
)
self
.
check_grad
(
{
'Input'
}
,
'Out'
,
check_eager
=
True
)
def
initTestCase
(
self
):
self
.
input
=
np
.
random
.
rand
(
100
)
...
...
python/paddle/fluid/tests/unittests/test_top_k_op.py
浏览文件 @
9fa98349
...
...
@@ -57,7 +57,7 @@ class TestTopkOp(OpTest):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
set
([
'X'
])
,
'Out'
)
self
.
check_grad
(
{
'X'
}
,
'Out'
)
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/xpu/test_affine_channel_op_xpu.py
浏览文件 @
9fa98349
...
...
@@ -79,7 +79,7 @@ class TestAffineChannelOp(XPUOpTest):
paddle
.
enable_static
()
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'X'
],
'Out'
,
no_grad_set
=
set
([
'Scale'
,
'Bias'
])
place
,
[
'X'
],
'Out'
,
no_grad_set
=
{
'Scale'
,
'Bias'
}
)
def
init_test_case
(
self
):
...
...
python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py
浏览文件 @
9fa98349
...
...
@@ -276,7 +276,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper):
if
core
.
is_compiled_with_xpu
():
paddle
.
enable_static
()
self
.
check_grad_with_place
(
self
.
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
set
([
'Filter'
])
self
.
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
{
'Filter'
}
)
def
test_check_grad_no_input
(
self
):
...
...
@@ -285,7 +285,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper):
if
core
.
is_compiled_with_xpu
():
paddle
.
enable_static
()
self
.
check_grad_with_place
(
self
.
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
set
([
'Input'
])
self
.
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
{
'Input'
}
)
def
init_test_case
(
self
):
...
...
@@ -440,7 +440,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper):
if
core
.
is_compiled_with_xpu
():
paddle
.
enable_static
()
self
.
check_grad_with_place
(
self
.
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
set
([
'Filter'
])
self
.
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
{
'Filter'
}
)
def
test_check_grad_no_input
(
self
):
...
...
@@ -450,7 +450,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper):
if
core
.
is_compiled_with_xpu
():
paddle
.
enable_static
()
self
.
check_grad_with_place
(
self
.
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
set
([
'Input'
])
self
.
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
{
'Input'
}
)
def
init_test_case
(
self
):
...
...
python/paddle/fluid/tests/unittests/xpu/test_conv2d_transpose_op_xpu.py
浏览文件 @
9fa98349
...
...
@@ -192,19 +192,19 @@ class XPUTestConv2DTransposeOp(XPUOpTestWrapper):
def
test_check_grad_no_input
(
self
):
if
self
.
need_check_grad
:
self
.
check_grad_with_place
(
self
.
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
set
([
'Input'
])
self
.
place
,
[
'Filter'
],
'Output'
,
no_grad_set
=
{
'Input'
}
)
def
test_check_grad_no_filter
(
self
):
if
self
.
need_check_grad
:
self
.
check_grad_with_place
(
self
.
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
set
([
'Filter'
])
self
.
place
,
[
'Input'
],
'Output'
,
no_grad_set
=
{
'Filter'
}
)
def
test_check_grad
(
self
):
if
self
.
need_check_grad
:
self
.
check_grad_with_place
(
self
.
place
,
set
([
'Input'
,
'Filter'
])
,
'Output'
self
.
place
,
{
'Input'
,
'Filter'
}
,
'Output'
)
def
init_test_case
(
self
):
...
...
python/paddle/fluid/tests/unittests/xpu/test_conv3d_op_xpu.py
浏览文件 @
9fa98349
...
...
@@ -275,7 +275,7 @@ class XPUTestConv3DOp(XPUOpTestWrapper):
[
'Input'
],
'Output'
,
max_relative_error
=
0.03
,
no_grad_set
=
set
([
'Filter'
])
,
no_grad_set
=
{
'Filter'
}
,
)
def
test_check_grad_no_input
(
self
):
...
...
@@ -286,7 +286,7 @@ class XPUTestConv3DOp(XPUOpTestWrapper):
[
'Filter'
],
'Output'
,
max_relative_error
=
0.03
,
no_grad_set
=
set
([
'Input'
])
,
no_grad_set
=
{
'Input'
}
,
)
def
init_test_case
(
self
):
...
...
@@ -445,7 +445,7 @@ class XPUTestConv3DOp_v2(XPUOpTestWrapper):
[
'Input'
],
'Output'
,
max_relative_error
=
0.03
,
no_grad_set
=
set
([
'Filter'
])
,
no_grad_set
=
{
'Filter'
}
,
)
def
test_check_grad_no_input
(
self
):
...
...
@@ -455,7 +455,7 @@ class XPUTestConv3DOp_v2(XPUOpTestWrapper):
[
'Filter'
],
'Output'
,
max_relative_error
=
0.03
,
no_grad_set
=
set
([
'Input'
])
,
no_grad_set
=
{
'Input'
}
,
)
def
init_test_case
(
self
):
...
...
python/paddle/fluid/tests/unittests/xpu/test_instance_norm_op_xpu.py
浏览文件 @
9fa98349
...
...
@@ -145,27 +145,27 @@ class XPUTestInstanceNormOp(XPUOpTestWrapper):
class
TestXPUInstanceNormOp6
(
XPUTestInstanceNormOp
):
def
set_attrs
(
self
):
self
.
shape
=
[
10
,
12
,
32
,
32
]
self
.
no_grad_set
=
set
([
'Scale'
,
'Bias'
])
self
.
no_grad_set
=
{
'Scale'
,
'Bias'
}
class
TestXPUInstanceNormOp7
(
XPUTestInstanceNormOp
):
def
set_attrs
(
self
):
self
.
shape
=
[
4
,
5
,
6
,
7
]
self
.
no_grad_set
=
set
([
'Scale'
,
'Bias'
])
self
.
no_grad_set
=
{
'Scale'
,
'Bias'
}
class
TestXPUInstanceNormOp8
(
XPUTestInstanceNormOp
):
def
set_attrs
(
self
):
self
.
shape
=
[
1
,
8
,
16
,
16
]
self
.
no_grad_set
=
set
([
'Scale'
,
'Bias'
])
self
.
no_grad_set
=
{
'Scale'
,
'Bias'
}
class
TestXPUInstanceNormOp9
(
XPUTestInstanceNormOp
):
def
set_attrs
(
self
):
self
.
shape
=
[
4
,
16
,
256
,
128
]
self
.
no_grad_set
=
set
([
'Scale'
,
'Bias'
])
self
.
no_grad_set
=
{
'Scale'
,
'Bias'
}
class
TestXPUInstanceNormOp10
(
XPUTestInstanceNormOp
):
def
set_attrs
(
self
):
self
.
shape
=
[
10
,
3
,
512
,
1
]
self
.
no_grad_set
=
set
([
'Scale'
,
'Bias'
])
self
.
no_grad_set
=
{
'Scale'
,
'Bias'
}
class
TestInstanceNormOpError
(
XPUOpTest
):
def
setUp
(
self
):
...
...
python/paddle/fluid/tests/unittests/xpu/test_kldiv_loss_op_xpu.py
浏览文件 @
9fa98349
...
...
@@ -79,8 +79,8 @@ class XPUTestKLDivLossOp(XPUOpTestWrapper):
paddle
.
XPUPlace
(
0
),
[
'X'
],
'Loss'
,
no_grad_set
=
set
([
"Target"
])
,
check_
dygraph
=
True
,
no_grad_set
=
{
"Target"
}
,
check_
eager
=
True
,
)
def
initTestCase
(
self
):
...
...
python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py
浏览文件 @
9fa98349
...
...
@@ -346,7 +346,7 @@ class XPUTestPool2D_Op(XPUOpTestWrapper):
self
.
check_output_with_place
(
self
.
place
)
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
set
([
'X'
])
,
'Out'
)
self
.
check_grad_with_place
(
self
.
place
,
{
'X'
}
,
'Out'
)
def
init_data_format
(
self
):
self
.
data_format
=
"NCHW"
...
...
python/paddle/fluid/tests/unittests/xpu/test_pool3d_op_xpu.py
浏览文件 @
9fa98349
...
...
@@ -344,7 +344,7 @@ class XPUTestPool3DOp(XPUOpTestWrapper):
return
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
set
([
'X'
])
,
'Out'
)
self
.
check_grad_with_place
(
place
,
{
'X'
}
,
'Out'
)
def
init_data_format
(
self
):
self
.
data_format
=
"NCDHW"
...
...
@@ -532,7 +532,7 @@ class XPUTestPool3DOp(XPUOpTestWrapper):
if
self
.
dtype
==
np
.
float16
:
return
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
set
([
'X'
])
,
'Out'
)
self
.
check_grad_with_place
(
place
,
{
'X'
}
,
'Out'
)
support_types
=
get_xpu_op_support_types
(
'pool3d'
)
...
...
python/paddle/fluid/tests/unittests/xpu/test_pool_max_op_xpu.py
浏览文件 @
9fa98349
...
...
@@ -112,7 +112,7 @@ class XPUTestPoolWithIndex_op(XPUOpTestWrapper):
self
.
check_output_with_place
(
self
.
place
)
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
set
([
'X'
])
,
[
'Out'
])
self
.
check_grad_with_place
(
self
.
place
,
{
'X'
}
,
[
'Out'
])
def
init_test_case
(
self
):
self
.
pool_forward_naive
=
max_pool2D_forward_naive
...
...
python/paddle/fluid/tests/unittests/xpu/test_sequence_conv_op_xpu.py
浏览文件 @
9fa98349
...
...
@@ -178,7 +178,7 @@ class XPUTestSequenceConv(XPUOpTestWrapper):
def
test_check_grad_padding_data
(
self
):
if
self
.
padding_trainable
:
self
.
check_grad
(
[
'PaddingData'
],
'Out'
,
no_grad_set
=
set
([
'X'
,
'Filter'
])
[
'PaddingData'
],
'Out'
,
no_grad_set
=
{
'X'
,
'Filter'
}
)
def
test_check_grad_Filter
(
self
):
...
...
@@ -189,20 +189,18 @@ class XPUTestSequenceConv(XPUOpTestWrapper):
def
test_check_grad_input_filter
(
self
):
if
self
.
padding_trainable
:
self
.
check_grad
(
[
'X'
,
'Filter'
],
'Out'
,
no_grad_set
=
set
([
'PaddingData'
])
[
'X'
,
'Filter'
],
'Out'
,
no_grad_set
=
{
'PaddingData'
}
)
def
test_check_grad_padding_input
(
self
):
if
self
.
padding_trainable
:
self
.
check_grad
(
self
.
inputs_val_no_f
,
'Out'
,
no_grad_set
=
set
([
'Filter'
])
self
.
inputs_val_no_f
,
'Out'
,
no_grad_set
=
{
'Filter'
}
)
def
test_check_grad_padding_filter
(
self
):
if
self
.
padding_trainable
:
self
.
check_grad
(
self
.
inputs_val_no_x
,
'Out'
,
no_grad_set
=
set
([
'X'
])
)
self
.
check_grad
(
self
.
inputs_val_no_x
,
'Out'
,
no_grad_set
=
{
'X'
})
def
init_test_case
(
self
):
self
.
input_row
=
7
...
...
python/paddle/jit/dy2static/basic_api_transformer.py
浏览文件 @
9fa98349
...
...
@@ -197,11 +197,9 @@ class AttributeJstTransformer(BaseTransformer):
assert
isinstance
(
node
,
gast
.
AST
),
"Input non-gast.AST node for the initialization of ToTensorTransformer."
self
.
interested_name
=
set
(
[
'size'
,
]
)
self
.
interested_name
=
{
'size'
,
}
self
.
root
=
node
def
transform
(
self
):
...
...
python/paddle/jit/dy2static/ifelse_transformer.py
浏览文件 @
9fa98349
...
...
@@ -288,7 +288,7 @@ class NameVisitor(gast.NodeVisitor):
return
new_name_ids
def
_is_call_func_name_node
(
self
,
node
):
white_func_names
=
set
([
'append'
,
'extend'
])
white_func_names
=
{
'append'
,
'extend'
}
if
len
(
self
.
ancestor_nodes
)
>
1
:
assert
self
.
ancestor_nodes
[
-
1
]
==
node
parent_node
=
self
.
ancestor_nodes
[
-
2
]
...
...
python/paddle/static/quantization/quant2_int8_mkldnn_pass.py
浏览文件 @
9fa98349
...
...
@@ -62,7 +62,7 @@ class Quant2Int8MkldnnPass:
]
self
.
_ops_to_quantize
=
_ops_to_quantize
self
.
_op_ids_to_skip
=
(
_op_ids_to_skip
if
_op_ids_to_skip
is
not
None
else
set
([
-
1
])
_op_ids_to_skip
if
_op_ids_to_skip
is
not
None
else
{
-
1
}
)
self
.
_scale_immutable_ops
=
[
'transpose2'
,
...
...
python/paddle/static/quantization/tests/quant2_int8_image_classification_comparison.py
浏览文件 @
9fa98349
...
...
@@ -387,7 +387,7 @@ class Quant2Int8ImageClassificationComparisonTest(unittest.TestCase):
test_case_args
.
ops_to_quantize
)
self
.
_op_ids_to_skip
=
set
([
-
1
])
self
.
_op_ids_to_skip
=
{
-
1
}
if
test_case_args
.
op_ids_to_skip
:
self
.
_op_ids_to_skip
=
self
.
_ints_from_csv
(
test_case_args
.
op_ids_to_skip
...
...
python/paddle/static/quantization/tests/quant2_int8_nlp_comparison.py
浏览文件 @
9fa98349
...
...
@@ -325,7 +325,7 @@ class QuantInt8NLPComparisonTest(unittest.TestCase):
test_case_args
.
ops_to_quantize
)
self
.
_op_ids_to_skip
=
set
([
-
1
])
self
.
_op_ids_to_skip
=
{
-
1
}
if
test_case_args
.
op_ids_to_skip
:
self
.
_op_ids_to_skip
=
self
.
_ints_from_csv
(
test_case_args
.
op_ids_to_skip
...
...
python/paddle/utils/cpp_extension/extension_utils.py
浏览文件 @
9fa98349
...
...
@@ -887,7 +887,7 @@ def add_compile_flag(extra_compile_args, flags):
def
is_cuda_file
(
path
):
cuda_suffix
=
set
([
'.cu'
])
cuda_suffix
=
{
'.cu'
}
items
=
os
.
path
.
splitext
(
path
)
assert
len
(
items
)
>
1
return
items
[
-
1
]
in
cuda_suffix
...
...
tools/check_op_register_type.py
浏览文件 @
9fa98349
...
...
@@ -25,8 +25,8 @@ import sys
from
paddle
import
fluid
INTS
=
set
([
'int'
,
'int64_t'
])
FLOATS
=
set
([
'float'
,
'double'
])
INTS
=
{
'int'
,
'int64_t'
}
FLOATS
=
{
'float'
,
'double'
}
def
get_all_kernels
():
...
...
tools/print_signatures.py
浏览文件 @
9fa98349
...
...
@@ -192,7 +192,7 @@ def insert_api_into_dict(full_name, gen_doc_anno=None):
api_info_dict
[
fc_id
][
"all_names"
].
add
(
full_name
)
else
:
api_info_dict
[
fc_id
]
=
{
"all_names"
:
set
([
full_name
])
,
"all_names"
:
{
full_name
}
,
"id"
:
fc_id
,
"object"
:
obj
,
"type"
:
type
(
obj
).
__name__
,
...
...
tools/sampcd_processor.py
浏览文件 @
9fa98349
...
...
@@ -251,7 +251,7 @@ def is_required_match(requirestr, cbtitle='not-specified'):
None - skipped # trick
"""
global
SAMPLE_CODE_TEST_CAPACITY
,
RUN_ON_DEVICE
# readonly
requires
=
set
([
'cpu'
])
requires
=
{
'cpu'
}
if
requirestr
:
for
r
in
requirestr
.
split
(
','
):
rr
=
r
.
strip
().
lower
()
...
...
tools/test_print_signatures.py
浏览文件 @
9fa98349
...
...
@@ -77,7 +77,7 @@ class Test_is_primitive(unittest.TestCase):
self
.
assertTrue
(
is_primitive
(
set
()))
self
.
assertTrue
(
is_primitive
([
1
,
2
]))
self
.
assertTrue
(
is_primitive
((
1.1
,
2.2
)))
self
.
assertTrue
(
is_primitive
(
set
([
1
,
2.3
])
))
self
.
assertTrue
(
is_primitive
(
{
1
,
2.3
}
))
self
.
assertFalse
(
is_primitive
(
range
(
3
)))
self
.
assertFalse
(
is_primitive
({}))
self
.
assertFalse
(
is_primitive
([
1
,
1j
]))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录