Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
ae25ab56
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ae25ab56
编写于
7月 27, 2022
作者:
Y
ykkk2333
提交者:
GitHub
7月 27, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
xpu unittest grad compute supports more types, *test=kunlun (#44606)
上级
5be7a1ff
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
84 addition
and
11 deletion
+84
-11
python/paddle/fluid/tests/unittests/op_test_xpu.py
python/paddle/fluid/tests/unittests/op_test_xpu.py
+81
-2
python/paddle/fluid/tests/unittests/xpu/test_flatten2_op_xpu.py
.../paddle/fluid/tests/unittests/xpu/test_flatten2_op_xpu.py
+1
-3
python/paddle/fluid/tests/unittests/xpu/test_flatten_contiguous_range_op_xpu.py
...sts/unittests/xpu/test_flatten_contiguous_range_op_xpu.py
+1
-3
python/paddle/fluid/tests/unittests/xpu/test_flatten_op_xpu.py
...n/paddle/fluid/tests/unittests/xpu/test_flatten_op_xpu.py
+1
-3
未找到文件。
python/paddle/fluid/tests/unittests/op_test_xpu.py
浏览文件 @
ae25ab56
...
...
@@ -32,13 +32,13 @@ import paddle.fluid.core as core
from
paddle.fluid.backward
import
append_backward
from
paddle.fluid.op
import
Operator
from
paddle.fluid.executor
import
Executor
from
paddle.fluid.framework
import
Program
,
OpProtoHolder
,
Variable
from
paddle.fluid.framework
import
Program
,
OpProtoHolder
,
Variable
,
convert_np_dtype_to_dtype_
from
testsuite
import
create_op
,
set_input
,
append_input_output
,
append_loss_ops
from
paddle.fluid
import
unique_name
from
white_list
import
op_accuracy_white_list
,
check_shape_white_list
,
compile_vs_runtime_white_list
,
no_check_set_white_list
from
white_list
import
op_threshold_white_list
,
no_grad_set_white_list
from
op_test
import
OpTest
,
_set_use_system_allocator
,
get_numeric_gradient
from
xpu.get_test_cover_info
import
is_empty_grad_op_type
from
xpu.get_test_cover_info
import
is_empty_grad_op_type
,
get_xpu_op_support_types
,
type_dict_str_to_numpy
class
XPUOpTest
(
OpTest
):
...
...
@@ -66,6 +66,10 @@ class XPUOpTest(OpTest):
place
=
paddle
.
XPUPlace
(
0
)
if
core
.
is_float16_supported
(
place
)
==
False
:
return
if
cls
.
dtype
==
np
.
float64
:
return
super
().
tearDownClass
()
def
_get_places
(
self
):
...
...
@@ -144,6 +148,14 @@ class XPUOpTest(OpTest):
self
.
_check_grad_helper
()
return
cast_grad_op_types
=
get_xpu_op_support_types
(
'cast'
)
cast_grad_op_types_np
=
[]
for
ctype
in
cast_grad_op_types
:
cast_grad_op_types_np
.
append
(
type_dict_str_to_numpy
[
ctype
])
if
(
self
.
dtype
not
in
cast_grad_op_types_np
):
return
if
self
.
dtype
==
np
.
float64
:
return
...
...
@@ -212,6 +224,11 @@ class XPUOpTest(OpTest):
op_attrs
[
"use_mkldnn"
]
=
False
use_onednn
=
True
mean_grad_op_types
=
get_xpu_op_support_types
(
'mean'
)
mean_grad_op_types_np
=
[]
for
mtype
in
mean_grad_op_types
:
mean_grad_op_types_np
.
append
(
type_dict_str_to_numpy
[
mtype
])
self
.
op
=
create_op
(
self
.
scope
,
self
.
op_type
,
op_inputs
,
...
...
@@ -238,6 +255,68 @@ class XPUOpTest(OpTest):
if
not
type
(
output_names
)
is
list
:
output_names
=
[
output_names
]
if
(
self
.
dtype
not
in
mean_grad_op_types_np
):
prog
=
Program
()
block
=
prog
.
global_block
()
scope
=
core
.
Scope
()
self
.
_append_ops
(
block
)
inputs
=
self
.
_get_inputs
(
block
)
outputs
=
self
.
_get_outputs
(
block
)
feed_dict
=
self
.
feed_var
(
inputs
,
place
)
cast_inputs
=
list
(
map
(
block
.
var
,
output_names
))
cast_outputs
=
block
.
create_var
(
dtype
=
"float32"
,
shape
=
cast_inputs
[
0
].
shape
)
cast_op
=
block
.
append_op
(
type
=
"cast"
,
inputs
=
{
"X"
:
cast_inputs
},
outputs
=
{
"Out"
:
cast_outputs
},
attrs
=
{
"in_dtype"
:
convert_np_dtype_to_dtype_
(
self
.
dtype
),
"out_dtype"
:
core
.
VarDesc
.
VarType
.
FP32
})
cast_op
.
desc
.
infer_var_type
(
block
.
desc
)
cast_op
.
desc
.
infer_shape
(
block
.
desc
)
output_names
=
[
cast_outputs
.
name
]
loss
=
append_loss_ops
(
block
,
output_names
)
loss_names
=
[
loss
.
name
]
recast_inputs
=
list
(
map
(
block
.
var
,
loss_names
))
recast_loss
=
block
.
create_var
(
dtype
=
self
.
dtype
,
shape
=
recast_inputs
[
0
].
shape
)
recast_op
=
block
.
append_op
(
type
=
"cast"
,
inputs
=
{
"X"
:
recast_inputs
},
outputs
=
{
"Out"
:
recast_loss
},
attrs
=
{
"in_dtype"
:
core
.
VarDesc
.
VarType
.
FP32
,
"out_dtype"
:
convert_np_dtype_to_dtype_
(
self
.
dtype
)
})
recast_op
.
desc
.
infer_var_type
(
block
.
desc
)
recast_op
.
desc
.
infer_shape
(
block
.
desc
)
param_grad_list
=
append_backward
(
loss
=
recast_loss
,
parameter_list
=
[
input_to_check
],
no_grad_set
=
no_grad_set
)
fetch_list
=
[
g
for
p
,
g
in
param_grad_list
]
executor
=
fluid
.
Executor
(
place
)
return
list
(
map
(
np
.
array
,
executor
.
run
(
prog
,
feed_dict
,
fetch_list
,
scope
=
scope
,
return_numpy
=
False
)))
analytic_grads
=
self
.
_get_gradient
(
inputs_to_check
,
place
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_flatten2_op_xpu.py
浏览文件 @
ae25ab56
...
...
@@ -93,9 +93,7 @@ class XPUTestFlatten2Op(XPUOpTestWrapper):
support_types
=
get_xpu_op_support_types
(
'flatten2'
)
support_types_for_grad
=
get_xpu_op_support_types
(
'mean'
)
for
stype
in
support_types
:
if
stype
in
support_types_for_grad
:
create_test_class
(
globals
(),
XPUTestFlatten2Op
,
stype
)
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/xpu/test_flatten_contiguous_range_op_xpu.py
浏览文件 @
ae25ab56
...
...
@@ -337,9 +337,7 @@ class TestFlattenPython(unittest.TestCase):
support_types
=
get_xpu_op_support_types
(
'flatten_contiguous_range'
)
support_types_for_grad
=
get_xpu_op_support_types
(
'mean'
)
for
stype
in
support_types
:
if
stype
in
support_types_for_grad
:
create_test_class
(
globals
(),
XPUTestFlattenOp
,
stype
)
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/xpu/test_flatten_op_xpu.py
浏览文件 @
ae25ab56
...
...
@@ -87,9 +87,7 @@ class XPUTestFlattenOp(XPUOpTestWrapper):
support_types
=
get_xpu_op_support_types
(
'flatten'
)
support_types_for_grad
=
get_xpu_op_support_types
(
'mean'
)
for
stype
in
support_types
:
if
stype
in
support_types_for_grad
:
create_test_class
(
globals
(),
XPUTestFlattenOp
,
stype
)
if
__name__
==
"__main__"
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录