Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
f21d7957
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
f21d7957
编写于
2月 16, 2022
作者:
T
TTerror
提交者:
GitHub
2月 16, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
refactor huber_loss/argsor unittests for kunlun, *test=kunlun (#39527)
上级
6eb95caf
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
133 addition
and
267 deletion
+133
-267
python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py
...n/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py
+3
-3
python/paddle/fluid/tests/unittests/xpu/test_argsort_op_xpu.py
...n/paddle/fluid/tests/unittests/xpu/test_argsort_op_xpu.py
+67
-208
python/paddle/fluid/tests/unittests/xpu/test_huber_loss_op_xpu.py
...addle/fluid/tests/unittests/xpu/test_huber_loss_op_xpu.py
+63
-56
未找到文件。
python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py
浏览文件 @
f21d7957
...
...
@@ -83,8 +83,8 @@ type_dict_str_to_numpy = {
}
xpu_test_op_white_list
=
[]
xpu_test_type_white_list
=
[]
xpu_test_op_type_white_list
=
[
'float64'
]
xpu_test_type_white_list
=
[
'float64'
]
xpu_test_op_type_white_list
=
[]
xpu_test_device_op_white_list
=
[]
xpu_test_device_op_type_white_list
=
[]
...
...
@@ -186,7 +186,7 @@ def get_xpu_op_support_types(op_name, dev_id=0):
paddle
.
bfloat16
])
else
:
support_type_str_list
.
append
(
type_dict_paddle_to_str
[
stype
])
type_white_list
=
get_
op_
type_white_list
()
type_white_list
=
get_type_white_list
()
return
[
stype
for
stype
in
support_type_str_list
if
stype
not
in
type_white_list
]
...
...
python/paddle/fluid/tests/unittests/xpu/test_argsort_op_xpu.py
浏览文件 @
f21d7957
...
...
@@ -18,220 +18,79 @@ import numpy as np
import
unittest
import
sys
sys
.
path
.
append
(
".."
)
import
paddle
from
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
from
paddle.fluid
import
ParamAttr
from
paddle.fluid.framework
import
Program
,
grad_var_name
from
paddle.fluid.executor
import
Executor
from
paddle.fluid.backward
import
append_backward
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
class
TestArgsortOp
(
XPUOpTest
):
def
setUp
(
self
):
self
.
set_xpu
()
self
.
op_type
=
"argsort"
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_dtype
()
self
.
init_inputshape
()
self
.
init_axis
()
self
.
init_direction
()
self
.
x
=
np
.
random
.
random
(
self
.
input_shape
).
astype
(
self
.
dtype
)
self
.
inputs
=
{
"X"
:
self
.
x
}
self
.
attrs
=
{
"axis"
:
self
.
axis
,
"descending"
:
self
.
descending
}
self
.
get_output
()
self
.
outputs
=
{
"Out"
:
self
.
sorted_x
,
"Indices"
:
self
.
indices
}
def
get_output
(
self
):
if
self
.
descending
:
self
.
indices
=
np
.
flip
(
np
.
argsort
(
self
.
x
,
kind
=
'heapsort'
,
axis
=
self
.
axis
),
self
.
axis
)
self
.
sorted_x
=
np
.
flip
(
np
.
sort
(
self
.
x
,
kind
=
'heapsort'
,
axis
=
self
.
axis
),
self
.
axis
)
else
:
self
.
indices
=
np
.
argsort
(
self
.
x
,
kind
=
'heapsort'
,
axis
=
self
.
axis
)
self
.
sorted_x
=
np
.
sort
(
self
.
x
,
kind
=
'heapsort'
,
axis
=
self
.
axis
)
def
set_xpu
(
self
):
self
.
__class__
.
use_xpu
=
True
self
.
__class__
.
no_need_check_grad
=
True
def
init_inputshape
(
self
):
self
.
input_shape
=
(
2
,
2
,
2
,
3
,
3
)
def
init_dtype
(
self
):
self
.
dtype
=
'float32'
def
init_axis
(
self
):
self
.
axis
=
-
1
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
)
def
init_direction
(
self
):
self
.
descending
=
False
class
TestArgsortOpAxis0XPU
(
TestArgsortOp
):
def
init_axis
(
self
):
self
.
axis
=
0
class
TestArgsortOpAxis1XPU
(
TestArgsortOp
):
def
init_axis
(
self
):
self
.
axis
=
1
class
TestArgsortOpAxis2XPU
(
TestArgsortOp
):
def
init_axis
(
self
):
self
.
axis
=
2
class
TestArgsortOpAxisNeg1XPU
(
TestArgsortOp
):
def
init_axis
(
self
):
self
.
axis
=
-
1
class
TestArgsortOpAxisNeg2XPU
(
TestArgsortOp
):
def
init_axis
(
self
):
self
.
axis
=
-
2
class
TestArgsortOpDescendingAxisXPU
(
TestArgsortOp
):
def
init_direction
(
self
):
self
.
descending
=
True
class
TestArgsortOpDescendingAxis0XPU
(
TestArgsortOpAxis0XPU
):
def
init_direction
(
self
):
self
.
descending
=
True
class
TestArgsortOpDescendingAxis1XPU
(
TestArgsortOpAxis1XPU
):
def
init_direction
(
self
):
self
.
descending
=
True
class
TestArgsortOpDescendingAxis2XPU
(
TestArgsortOpAxis2XPU
):
def
init_direction
(
self
):
self
.
descending
=
True
class
TestArgsortOpDescendingAxisNeg1XPU
(
TestArgsortOpAxisNeg1XPU
):
def
init_direction
(
self
):
self
.
descending
=
True
class
TestArgsortOpDescendingAxisNeg2XPU
(
TestArgsortOpAxisNeg2XPU
):
def
init_direction
(
self
):
self
.
descending
=
True
class
TestArgsortOpAxis0XPUINT64
(
TestArgsortOp
):
def
setUp
(
self
):
self
.
set_xpu
()
self
.
op_type
=
"argsort"
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_dtype
()
self
.
init_inputshape
()
self
.
init_axis
()
self
.
init_direction
()
self
.
x
=
np
.
random
.
randint
(
low
=-
1000
,
high
=
1000
,
size
=
self
.
input_shape
).
astype
(
self
.
dtype
)
self
.
inputs
=
{
"X"
:
self
.
x
}
self
.
attrs
=
{
"axis"
:
self
.
axis
,
"descending"
:
self
.
descending
}
self
.
get_output
()
self
.
outputs
=
{
"Out"
:
self
.
sorted_x
,
"Indices"
:
self
.
indices
}
def
init_axis
(
self
):
self
.
axis
=
0
def
init_dtype
(
self
):
self
.
dtype
=
'int64'
class
TestArgsortOpAxis1XPUINT64
(
TestArgsortOpAxis0XPUINT64
):
def
init_axis
(
self
):
self
.
axis
=
1
class
TestArgsortOpAxis2XPUINT64
(
TestArgsortOpAxis0XPUINT64
):
def
init_axis
(
self
):
self
.
axis
=
2
class
TestArgsortOpAxisNeg1XPUINT64
(
TestArgsortOpAxis0XPUINT64
):
def
init_axis
(
self
):
self
.
axis
=
-
1
class
TestArgsortOpAxisNeg2XPUINT64
(
TestArgsortOpAxis0XPUINT64
):
def
init_axis
(
self
):
self
.
axis
=
-
2
class
TestArgsortOpDescendingAxisXPUINT64
(
TestArgsortOpAxis0XPUINT64
):
def
init_direction
(
self
):
self
.
descending
=
True
class
TestArgsortOpDescendingAxis0XPUINT64
(
TestArgsortOpAxis0XPUINT64
):
def
init_direction
(
self
):
self
.
descending
=
True
class
TestArgsortOpDescendingAxis1XPUINT64
(
TestArgsortOpAxis1XPUINT64
):
def
init_direction
(
self
):
self
.
descending
=
True
class
TestArgsortOpDescendingAxis2XPUINT64
(
TestArgsortOpAxis2XPUINT64
):
def
init_direction
(
self
):
self
.
descending
=
True
class
TestArgsortOpDescendingAxisNeg1XPUINT64
(
TestArgsortOpAxisNeg1XPUINT64
):
def
init_direction
(
self
):
self
.
descending
=
True
class
TestArgsortOpDescendingAxisNeg2XPUINT64
(
TestArgsortOpAxisNeg2XPUINT64
):
def
init_direction
(
self
):
self
.
descending
=
True
class
TestArgsortOpAxis0XPUINT
(
TestArgsortOp
):
def
setUp
(
self
):
self
.
set_xpu
()
self
.
op_type
=
"argsort"
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_dtype
()
self
.
init_inputshape
()
self
.
init_axis
()
self
.
init_direction
()
self
.
x
=
np
.
random
.
randint
(
low
=-
1000
,
high
=
1000
,
size
=
self
.
input_shape
).
astype
(
self
.
dtype
)
self
.
inputs
=
{
"X"
:
self
.
x
}
self
.
attrs
=
{
"axis"
:
self
.
axis
,
"descending"
:
self
.
descending
}
self
.
get_output
()
self
.
outputs
=
{
"Out"
:
self
.
sorted_x
,
"Indices"
:
self
.
indices
}
def
init_axis
(
self
):
self
.
axis
=
0
def
init_dtype
(
self
):
self
.
dtype
=
'int'
class
XPUTestArgsortOp
(
XPUOpTestWrapper
):
def
__init__
(
self
):
self
.
op_name
=
'argsort'
self
.
use_dynamic_create_class
=
True
def
dynamic_create_class
(
self
):
base_class
=
self
.
TestArgsortOp
classes
=
[]
for
descending
in
[
True
,
False
]:
for
axis
in
[
0
,
1
,
2
,
-
1
,
-
2
]:
class_name
=
'XPUTestArgsortOp_axis_'
+
str
(
axis
)
+
'_'
+
str
(
descending
)
attr_dict
=
{
'init_axis'
:
axis
,
'init_descending'
:
descending
}
classes
.
append
([
class_name
,
attr_dict
])
return
base_class
,
classes
class
TestArgsortOp
(
XPUOpTest
):
def
setUp
(
self
):
self
.
set_xpu
()
self
.
op_type
=
"argsort"
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
dtype
=
self
.
in_type
self
.
input_shape
=
(
2
,
2
,
2
,
3
,
3
)
self
.
axis
=
-
1
if
not
hasattr
(
self
,
'init_axis'
)
else
self
.
init_axis
self
.
descending
=
False
if
not
hasattr
(
self
,
'init_descending'
)
else
self
.
init_descending
if
self
.
dtype
==
np
.
float32
:
self
.
x
=
np
.
random
.
random
(
self
.
input_shape
).
astype
(
self
.
dtype
)
else
:
self
.
x
=
np
.
random
.
randint
(
low
=-
1000
,
high
=
1000
,
size
=
self
.
input_shape
).
astype
(
self
.
dtype
)
self
.
inputs
=
{
"X"
:
self
.
x
}
self
.
attrs
=
{
"axis"
:
self
.
axis
,
"descending"
:
self
.
descending
}
self
.
get_output
()
self
.
outputs
=
{
"Out"
:
self
.
sorted_x
,
"Indices"
:
self
.
indices
}
def
get_output
(
self
):
if
self
.
descending
:
self
.
indices
=
np
.
flip
(
np
.
argsort
(
self
.
x
,
kind
=
'heapsort'
,
axis
=
self
.
axis
),
self
.
axis
)
self
.
sorted_x
=
np
.
flip
(
np
.
sort
(
self
.
x
,
kind
=
'heapsort'
,
axis
=
self
.
axis
),
self
.
axis
)
else
:
self
.
indices
=
np
.
argsort
(
self
.
x
,
kind
=
'heapsort'
,
axis
=
self
.
axis
)
self
.
sorted_x
=
np
.
sort
(
self
.
x
,
kind
=
'heapsort'
,
axis
=
self
.
axis
)
def
set_xpu
(
self
):
self
.
__class__
.
use_xpu
=
True
self
.
__class__
.
no_need_check_grad
=
True
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
)
support_types
=
get_xpu_op_support_types
(
'argsort'
)
for
stype
in
support_types
:
create_test_class
(
globals
(),
XPUTestArgsortOp
,
stype
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/xpu/test_huber_loss_op_xpu.py
浏览文件 @
f21d7957
...
...
@@ -18,11 +18,13 @@ import unittest
import
numpy
as
np
import
sys
sys
.
path
.
append
(
".."
)
from
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
compiler
,
Program
,
program_guard
from
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
...
...
@@ -35,76 +37,81 @@ def huber_loss_forward(val, delta):
return
delta
*
(
abs_val
-
0.5
*
delta
)
class
TestHuberLossOp
(
XPUOpTest
):
def
setUp
(
self
):
self
.
set_xpu
()
self
.
op_type
=
'huber_loss'
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_dtype
()
self
.
set_inputs
()
self
.
set_attrs
()
self
.
set_outputs
()
class
XPUTestHuberLossOp
(
XPUOpTestWrapper
):
def
__init__
(
self
):
self
.
op_name
=
'huber_loss'
self
.
use_dynamic_create_class
=
False
def
set_inputs
(
self
):
shape
=
self
.
set_shape
()
x
=
np
.
random
.
uniform
(
0
,
1.
,
shape
).
astype
(
self
.
dtype
)
y
=
np
.
random
.
uniform
(
0
,
1.
,
shape
).
astype
(
self
.
dtype
)
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
),
'Y'
:
OpTest
.
np_dtype_to_fluid_dtype
(
y
)
}
class
TestHuberLossOp
(
XPUOpTest
):
def
setUp
(
self
):
self
.
set_xpu
()
self
.
op_type
=
'huber_loss'
self
.
place
=
paddle
.
XPUPlace
(
0
)
def
set_attrs
(
self
):
self
.
attrs
=
{
'delta'
:
0.5
}
self
.
init_dtype
()
self
.
set_inputs
()
self
.
set_attrs
()
self
.
set_outputs
()
def
set_outputs
(
self
):
delta
=
self
.
attrs
[
'delta'
]
shape
=
self
.
set_shape
()
residual
=
self
.
inputs
[
'Y'
]
-
self
.
inputs
[
'X'
]
loss
=
np
.
vectorize
(
huber_loss_forward
)(
residual
,
delta
).
astype
(
self
.
dtype
)
self
.
outputs
=
{
'Residual'
:
residual
,
'Out'
:
loss
.
reshape
(
shape
)}
def
set_inputs
(
self
):
shape
=
self
.
set_shape
()
x
=
np
.
random
.
uniform
(
0
,
1.
,
shape
).
astype
(
self
.
dtype
)
y
=
np
.
random
.
uniform
(
0
,
1.
,
shape
).
astype
(
self
.
dtype
)
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
),
'Y'
:
OpTest
.
np_dtype_to_fluid_dtype
(
y
)
}
def
set_shape
(
self
):
return
(
100
,
1
)
def
set_attrs
(
self
):
self
.
attrs
=
{
'delta'
:
0.5
}
def
set_xpu
(
self
):
self
.
__class__
.
use_xpu
=
True
def
set_outputs
(
self
):
delta
=
self
.
attrs
[
'delta'
]
shape
=
self
.
set_shape
()
residual
=
self
.
inputs
[
'Y'
]
-
self
.
inputs
[
'X'
]
loss
=
np
.
vectorize
(
huber_loss_forward
)(
residual
,
delta
).
astype
(
self
.
dtype
)
self
.
outputs
=
{
'Residual'
:
residual
,
'Out'
:
loss
.
reshape
(
shape
)}
def
init_dty
pe
(
self
):
self
.
dtype
=
np
.
float32
def
set_sha
pe
(
self
):
return
(
100
,
1
)
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
)
def
set_xpu
(
self
):
self
.
__class__
.
use_xpu
=
True
def
test_check_grad_normal
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
,
'Y'
],
'Out'
)
def
init_dtype
(
self
):
self
.
dtype
=
self
.
in_type
def
test_check_grad_ingore_x
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
'Y'
],
'Out'
,
no_grad_set
=
set
(
"residual"
))
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
)
def
test_check_grad_ingore_y
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
],
'Out'
,
no_grad_set
=
set
(
'residual'
))
def
test_check_grad_normal
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
,
'Y'
],
'Out'
)
def
test_check_grad_ingore_x
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
'Y'
],
'Out'
,
no_grad_set
=
set
(
"residual"
))
def
TestHuberLossOp1
(
TestHuberLossOp
):
def
set_shape
(
self
):
return
(
64
)
def
test_check_grad_ingore_y
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
],
'Out'
,
no_grad_set
=
set
(
'residual'
)
)
class
TestHuberLossOp1
(
TestHuberLossOp
):
def
set_shape
(
self
):
return
(
640
)
def
TestHuberLossOp2
(
TestHuberLossOp
):
def
set_shape
(
self
):
return
(
6
,
6
)
class
TestHuberLossOp2
(
TestHuberLossOp
):
def
set_shape
(
self
):
return
(
10
,
10
)
class
TestHuberLossOp3
(
TestHuberLossOp
):
def
set_shape
(
self
):
return
(
10
,
10
,
1
)
def
TestHuberLossOp3
(
TestHuberLossOp
):
def
set_shape
(
self
):
return
(
6
,
6
,
1
)
support_types
=
get_xpu_op_support_types
(
'huber_loss'
)
for
stype
in
support_types
:
create_test_class
(
globals
(),
XPUTestHuberLossOp
,
stype
)
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录