Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
35ed11f3
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
35ed11f3
编写于
5月 06, 2022
作者:
L
Leo Chen
提交者:
GitHub
5月 06, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[cherry-pick] fix wrong place in ut (#42488)
* fix wrong place * skip bf16 test if not supported (#42503)
上级
58f40144
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
14 addition
and
15 deletion
+14
-15
python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py
...d/tests/unittests/test_imperative_auto_mixed_precision.py
+14
-15
未找到文件。
python/paddle/fluid/tests/unittests/test_imperative_auto_mixed_precision.py
浏览文件 @
35ed11f3
...
...
@@ -919,7 +919,7 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase):
# load_inference_model
paddle
.
enable_static
()
exe
=
paddle
.
static
.
Executor
(
paddle
.
CPUPlace
()
)
exe
=
paddle
.
static
.
Executor
()
[
inference_program
,
feed_target_names
,
fetch_targets
]
=
(
paddle
.
static
.
load_inference_model
(
path
,
exe
))
tensor_img
=
x
...
...
@@ -927,8 +927,8 @@ class TestPureFp16InferenceSaveLoad(unittest.TestCase):
feed
=
{
feed_target_names
[
0
]:
tensor_img
},
fetch_list
=
fetch_targets
)
print
(
"pred.numpy()"
,
pred
.
numpy
())
print
(
"result
s"
,
results
)
self
.
assertTrue
(
np
.
a
llclose
(
pred
.
numpy
(),
results
,
atol
=
1.e-5
))
print
(
"result
"
,
results
[
0
]
)
self
.
assertTrue
(
np
.
a
rray_equal
(
pred
.
numpy
(),
results
[
0
]
))
paddle
.
disable_static
()
def
test_inference_save_load
(
self
):
...
...
@@ -1254,18 +1254,17 @@ class TestBf16(unittest.TestCase):
def
test_bf16
(
self
):
def
func_isinstance
():
if
fluid
.
core
.
is_compiled_with_cuda
():
cudnn_version
=
paddle
.
device
.
get_cudnn_version
()
if
cudnn_version
is
not
None
and
cudnn_version
>=
8100
:
out_fp32
=
self
.
train
(
enable_amp
=
False
)
out_bf16_O1
=
self
.
train
(
enable_amp
=
True
,
amp_level
=
'O1'
)
out_bf16_O2
=
self
.
train
(
enable_amp
=
True
,
amp_level
=
'O2'
)
self
.
assertTrue
(
np
.
allclose
(
out_fp32
,
out_bf16_O1
,
rtol
=
1.e-3
,
atol
=
1.e-1
))
self
.
assertTrue
(
np
.
allclose
(
out_fp32
,
out_bf16_O2
,
rtol
=
1.e-3
,
atol
=
1.e-1
))
if
fluid
.
core
.
is_compiled_with_cuda
(
)
and
fluid
.
core
.
is_bfloat16_supported
(
paddle
.
CUDAPlace
(
0
)):
out_fp32
=
self
.
train
(
enable_amp
=
False
)
out_bf16_O1
=
self
.
train
(
enable_amp
=
True
,
amp_level
=
'O1'
)
out_bf16_O2
=
self
.
train
(
enable_amp
=
True
,
amp_level
=
'O2'
)
self
.
assertTrue
(
np
.
allclose
(
out_fp32
,
out_bf16_O1
,
rtol
=
1.e-3
,
atol
=
1.e-1
))
self
.
assertTrue
(
np
.
allclose
(
out_fp32
,
out_bf16_O2
,
rtol
=
1.e-3
,
atol
=
1.e-1
))
with
_test_eager_guard
():
func_isinstance
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录