Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
2c543193
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
2c543193
编写于
3月 20, 2023
作者:
V
Vvsmile
提交者:
GitHub
3月 20, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Adjust tolerance with modi grad (#51791)
上级
ac47d003
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
167 addition
and
20 deletion
+167
-20
python/paddle/fluid/tests/unittests/eager_op_test.py
python/paddle/fluid/tests/unittests/eager_op_test.py
+165
-19
python/paddle/fluid/tests/unittests/op_test.py
python/paddle/fluid/tests/unittests/op_test.py
+2
-1
未找到文件。
python/paddle/fluid/tests/unittests/eager_op_test.py
浏览文件 @
2c543193
...
...
@@ -332,6 +332,7 @@ class OpTest(unittest.TestCase):
cls
.
dtype
=
None
cls
.
outputs
=
{}
cls
.
input_shape_is_large
=
True
cls
.
is_calc_ref
=
False
cls
.
check_prim
=
False
np
.
random
.
seed
(
123
)
...
...
@@ -463,6 +464,7 @@ class OpTest(unittest.TestCase):
# Make sure this function is called after calling infer_dtype_from_inputs_outputs.
return
(
self
.
dtype
==
np
.
float16
or
self
.
dtype
==
"float16"
or
(
hasattr
(
self
,
'output_dtype'
)
and
self
.
output_dtype
==
np
.
float16
...
...
@@ -492,6 +494,18 @@ class OpTest(unittest.TestCase):
and
self
.
attrs
[
"use_xpu"
]
)
def
is_fp16_compared_with_fp32
(
self
):
return
self
.
is_float16_op
()
and
(
self
.
op_type
not
in
op_accuracy_white_list
.
NO_FP16_COMPARED_WITH_FP32_OP_LIST
)
def
enable_cal_ref_output
(
self
):
self
.
is_calc_ref
=
self
.
is_fp16_compared_with_fp32
()
def
disable_cal_ref_output
(
self
):
self
.
is_calc_ref
=
False
# set the self.output_dtype .
def
infer_dtype_from_inputs_outputs
(
self
,
inputs
,
outputs
):
def
is_np_data
(
input
):
...
...
@@ -565,7 +579,21 @@ class OpTest(unittest.TestCase):
tensor
=
core
.
LoDTensor
()
if
isinstance
(
np_value
,
tuple
):
tensor
.
set
(
np_value
[
0
],
place
)
dtype
=
np
.
array
(
np_value
[
1
]).
dtype
if
self
.
is_calc_ref
and
dtype
==
np
.
float16
:
if
isinstance
(
np_value
[
1
],
list
):
tensor
.
set_recursive_sequence_lengths
(
np
.
array
(
np_value
[
1
]).
astype
(
np
.
float32
)
)
else
:
tensor
.
set_recursive_sequence_lengths
(
np_value
[
1
].
astype
(
np
.
float32
)
)
else
:
tensor
.
set_recursive_sequence_lengths
(
np_value
[
1
])
else
:
if
self
.
is_calc_ref
and
np_value
.
dtype
==
np
.
float16
:
tensor
.
set
(
np_value
.
astype
(
np
.
float32
),
place
)
else
:
tensor
.
set
(
np_value
,
place
)
feed_map
[
name
]
=
tensor
...
...
@@ -573,9 +601,25 @@ class OpTest(unittest.TestCase):
tensor
=
core
.
LoDTensor
()
if
isinstance
(
self
.
inputs
[
var_name
],
tuple
):
tensor
.
set
(
self
.
inputs
[
var_name
][
0
],
place
)
if
(
self
.
is_calc_ref
and
self
.
inputs
[
var_name
][
1
].
dtype
==
np
.
float16
):
tensor
.
set_recursive_sequence_lengths
(
self
.
inputs
[
var_name
][
1
].
astype
(
np
.
float32
)
)
else
:
tensor
.
set_recursive_sequence_lengths
(
self
.
inputs
[
var_name
][
1
]
)
else
:
if
(
self
.
is_calc_ref
and
self
.
inputs
[
var_name
].
dtype
==
np
.
float16
):
tensor
.
set
(
self
.
inputs
[
var_name
].
astype
(
np
.
float32
),
place
)
else
:
tensor
.
set
(
self
.
inputs
[
var_name
],
place
)
feed_map
[
var_name
]
=
tensor
...
...
@@ -601,10 +645,10 @@ class OpTest(unittest.TestCase):
else
:
self
.
infer_dtype_from_inputs_outputs
(
self
.
inputs
,
self
.
outputs
)
inputs
=
append_input_output
(
block
,
op_proto
,
self
.
inputs
,
True
,
self
.
dtype
block
,
op_proto
,
self
.
inputs
,
True
,
self
.
dtype
,
self
.
is_calc_ref
)
outputs
=
append_input_output
(
block
,
op_proto
,
self
.
outputs
,
False
,
self
.
dtype
block
,
op_proto
,
self
.
outputs
,
False
,
self
.
dtype
,
self
.
is_calc_ref
)
if
hasattr
(
self
,
"cache_name_list"
):
...
...
@@ -724,7 +768,13 @@ class OpTest(unittest.TestCase):
def
append_input_output_for_dygraph
(
self
,
op_proto
,
np_list
,
is_input
,
if_return_inputs_grad_dict
,
block
):
def
create_var
(
np_value
,
name
,
is_input
,
if_return_inputs_grad_dict
):
def
create_var
(
np_value
,
name
,
is_input
,
if_return_inputs_grad_dict
,
is_calc_ref
=
False
,
):
np_value_temp
=
np_value
has_lod
=
False
lod_temp
=
None
...
...
@@ -734,7 +784,13 @@ class OpTest(unittest.TestCase):
lod_temp
=
np_value
[
1
]
if
is_input
:
if
self
.
is_calc_ref
and
np_value_temp
.
dtype
==
np
.
float16
:
v
=
self
.
_create_var_from_numpy
(
np_value_temp
.
astype
(
np
.
float32
)
)
else
:
v
=
self
.
_create_var_from_numpy
(
np_value_temp
)
if
if_return_inputs_grad_dict
:
v
.
stop_gradient
=
False
v
.
retain_grads
()
...
...
@@ -743,6 +799,15 @@ class OpTest(unittest.TestCase):
v
.
value
().
get_tensor
().
set_recursive_sequence_lengths
(
lod_temp
)
else
:
if
self
.
is_calc_ref
and
np_value_temp
.
dtype
==
np
.
float16
:
v
=
block
.
create_var
(
name
=
name
,
dtype
=
np
.
float32
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
persistable
=
False
,
stop_gradient
=
False
,
)
else
:
v
=
block
.
create_var
(
name
=
name
,
...
...
@@ -779,7 +844,11 @@ class OpTest(unittest.TestCase):
slot_name
=
name
for
(
name
,
np_value
)
in
np_list
[
name
]:
v
=
create_var
(
np_value
,
name
,
is_input
,
if_return_inputs_grad_dict
np_value
,
name
,
is_input
,
if_return_inputs_grad_dict
,
self
.
is_calc_ref
,
)
var_list
.
append
(
v
)
if
if_return_inputs_grad_dict
:
...
...
@@ -799,6 +868,7 @@ class OpTest(unittest.TestCase):
name_temp
,
is_input
,
if_return_inputs_grad_dict
,
self
.
is_calc_ref
,
)
var_dict
[
name
].
append
(
v
)
if
if_return_inputs_grad_dict
:
...
...
@@ -1457,6 +1527,19 @@ class OpTest(unittest.TestCase):
"Found failed {} {}"
.
format
(
dygraph_outs
.
keys
(),
target_name
),
)
def
find_imperative_expect
(
target_name
,
dygraph_outs
,
place
):
for
name
in
dygraph_outs
:
if
name
==
target_name
:
return
dygraph_outs
[
name
][
0
]
var_list
=
dygraph_outs
[
name
]
for
i
,
var
in
enumerate
(
var_list
):
if
var
.
name
==
target_name
:
return
dygraph_outs
[
name
][
i
]
self
.
assertTrue
(
False
,
"Found failed {} {}"
.
format
(
dygraph_outs
.
keys
(),
target_name
),
)
def
find_actual
(
target_name
,
fetch_list
):
found
=
[
i
...
...
@@ -1468,6 +1551,17 @@ class OpTest(unittest.TestCase):
)
return
found
[
0
]
def
find_expect
(
target_name
,
fetch_list
):
found
=
[
i
for
i
,
var_name
in
enumerate
(
fetch_list
)
if
var_name
==
target_name
]
self
.
assertTrue
(
len
(
found
)
==
1
,
"Found {} {}"
.
format
(
len
(
found
),
target_name
)
)
return
found
[
0
]
class
Checker
:
"""base class for check with self.outputs.
currently don't support check between checkers.
...
...
@@ -1505,6 +1599,10 @@ class OpTest(unittest.TestCase):
"""return: (actual_tensor(var_base), actual_numpy)"""
raise
NotImplementedError
(
"base class, not implement!"
)
def
find_expect_value
(
self
,
name
):
"""return: (expect_tensor(var_base), actual_numpy)"""
raise
NotImplementedError
(
"base class, not implement!"
)
def
_compare_numpy
(
self
,
name
,
actual_np
,
expect_np
):
self
.
op_test
.
assertTrue
(
np
.
allclose
(
...
...
@@ -1528,7 +1626,13 @@ class OpTest(unittest.TestCase):
def
compare_single_output_with_expect
(
self
,
name
,
expect
):
actual
,
actual_np
=
self
.
find_actual_value
(
name
)
expect_np
=
expect
[
0
]
if
isinstance
(
expect
,
tuple
)
else
expect
# expect_np = expect[0] if isinstance(expect, tuple) else expect
if
self
.
op_test
.
is_fp16_compared_with_fp32
():
expect
,
expect_np
=
self
.
find_expect_value
(
name
)
else
:
expect_np
=
(
expect
[
0
]
if
isinstance
(
expect
,
tuple
)
else
expect
)
actual_np
,
expect_np
=
self
.
convert_uint16_to_float_ifneed
(
actual_np
,
expect_np
)
...
...
@@ -1580,6 +1684,14 @@ class OpTest(unittest.TestCase):
)
self
.
outputs
=
outs
self
.
fetch_list
=
fetch_list
if
self
.
op_test
.
is_fp16_compared_with_fp32
():
self
.
op_test
.
enable_cal_ref_output
()
ref_outs
,
ref_fetch_list
=
self
.
op_test
.
_calc_output
(
place
,
no_check_set
=
no_check_set
)
self
.
op_test
.
disable_cal_ref_output
()
self
.
ref_outputs
=
ref_outs
self
.
ref_fetch_list
=
ref_fetch_list
def
find_actual_value
(
self
,
name
):
idx
=
find_actual
(
name
,
self
.
fetch_list
)
...
...
@@ -1587,6 +1699,12 @@ class OpTest(unittest.TestCase):
actual_t
=
np
.
array
(
actual
)
return
actual
,
actual_t
def
find_expect_value
(
self
,
name
):
idx
=
find_expect
(
name
,
self
.
ref_fetch_list
)
expect
=
self
.
ref_outputs
[
idx
]
expect_t
=
np
.
array
(
expect
)
return
expect
,
expect_t
def
convert_uint16_to_float_ifneed
(
self
,
actual_np
,
expect_np
):
"""
judge whether convert current output and expect to uint16.
...
...
@@ -1598,6 +1716,8 @@ class OpTest(unittest.TestCase):
]:
actual_np
=
convert_uint16_to_float
(
actual_np
)
self
.
rtol
=
1.0e-2
elif
actual_np
.
dtype
==
np
.
float16
:
self
.
rtol
=
1.0e-3
else
:
self
.
rtol
=
1.0e-5
if
(
...
...
@@ -1634,6 +1754,20 @@ class OpTest(unittest.TestCase):
)
self
.
outputs
=
dygraph_outs
if
self
.
op_test
.
is_fp16_compared_with_fp32
():
self
.
op_test
.
enable_cal_ref_output
()
self
.
is_python_api_test
=
True
ref_dygraph_outs
=
self
.
op_test
.
_calc_python_api_output
(
place
)
if
ref_dygraph_outs
is
None
:
self
.
is_python_api_test
=
False
ref_dygraph_outs
=
self
.
op_test
.
_calc_dygraph_output
(
place
,
no_check_set
=
no_check_set
)
self
.
ref_outputs
=
ref_dygraph_outs
self
.
op_test
.
disable_cal_ref_output
()
def
_compare_numpy
(
self
,
name
,
actual_np
,
expect_np
):
if
(
functools
.
reduce
(
lambda
x
,
y
:
x
*
y
,
actual_np
.
shape
,
1
)
...
...
@@ -1665,6 +1799,8 @@ class OpTest(unittest.TestCase):
np
.
float64
,
]:
self
.
rtol
=
1.0e-2
elif
actual_np
.
dtype
==
np
.
float16
:
self
.
rtol
=
1.0e-3
else
:
self
.
rtol
=
1.0e-5
if
self
.
op_test
.
is_bfloat16_op
():
...
...
@@ -1684,6 +1820,16 @@ class OpTest(unittest.TestCase):
)
return
imperative_actual
,
imperative_actual_t
def
find_expect_value
(
self
,
name
):
with
fluid
.
dygraph
.
base
.
guard
(
place
=
place
):
imperative_expect
=
find_imperative_expect
(
name
,
self
.
ref_outputs
,
place
)
imperative_expect_t
=
np
.
array
(
imperative_expect
.
value
().
get_tensor
()
)
return
imperative_expect
,
imperative_expect_t
def
_compare_list
(
self
,
name
,
actual
,
expect
):
"""if expect is a tuple, we need to compare list."""
with
fluid
.
dygraph
.
base
.
guard
(
place
=
place
):
...
...
python/paddle/fluid/tests/unittests/op_test.py
浏览文件 @
2c543193
...
...
@@ -465,6 +465,7 @@ class OpTest(unittest.TestCase):
# Make sure this function is called after calling infer_dtype_from_inputs_outputs.
return
(
self
.
dtype
==
np
.
float16
or
self
.
dtype
==
"float16"
or
(
hasattr
(
self
,
'output_dtype'
)
and
self
.
output_dtype
==
np
.
float16
...
...
@@ -1875,7 +1876,7 @@ class OpTest(unittest.TestCase):
with
_test_eager_guard
():
return
super
().
find_actual_value
(
name
)
def
find_expect_valu
r
(
self
,
name
):
def
find_expect_valu
e
(
self
,
name
):
with
_test_eager_guard
():
return
super
().
find_expect_value
(
name
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录