Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
9a1fdad3
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
9a1fdad3
编写于
7月 01, 2022
作者:
Y
ykkk2333
提交者:
GitHub
7月 01, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update new unittests of flatten ops and layernorm, *test=kunlun (#43895)
上级
37f2151f
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
366 addition
and
334 deletion
+366
-334
python/paddle/fluid/tests/unittests/xpu/test_flatten2_op_xpu.py
.../paddle/fluid/tests/unittests/xpu/test_flatten2_op_xpu.py
+53
-40
python/paddle/fluid/tests/unittests/xpu/test_flatten_contiguous_range_op_xpu.py
...sts/unittests/xpu/test_flatten_contiguous_range_op_xpu.py
+210
-206
python/paddle/fluid/tests/unittests/xpu/test_flatten_op_xpu.py
...n/paddle/fluid/tests/unittests/xpu/test_flatten_op_xpu.py
+48
-35
python/paddle/fluid/tests/unittests/xpu/test_layer_norm_op_xpu.py
...addle/fluid/tests/unittests/xpu/test_layer_norm_op_xpu.py
+55
-53
未找到文件。
python/paddle/fluid/tests/unittests/xpu/test_flatten2_op_xpu.py
浏览文件 @
9a1fdad3
...
...
@@ -23,67 +23,80 @@ import paddle
import
paddle.fluid
as
fluid
from
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
class
TestFlatten2Op
(
XPUOpTest
):
class
XPUTestFlatten2Op
(
XPUOpTestWrapper
):
def
setUp
(
self
):
self
.
set_xpu
()
self
.
op_type
=
"flatten2"
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_test_case
()
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
"float32"
)}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
),
"XShape"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
"float32"
)
}
def
__init__
(
self
):
self
.
op_name
=
'flatten2'
self
.
use_dynamic_create_class
=
False
def
set_xpu
(
self
):
self
.
__class__
.
use_xpu
=
True
class
TestFlatten2Op
(
XPUOpTest
):
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
,
no_check_set
=
[
"XShape"
])
def
setUp
(
self
):
self
.
set_xpu
()
self
.
op_type
=
"flatten2"
self
.
dtype
=
self
.
in_type
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_test_case
()
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
self
.
dtype
)
}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
),
"XShape"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
self
.
dtype
)
}
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
"X"
],
"Out"
)
def
set_xpu
(
self
):
self
.
__class__
.
use_xpu
=
True
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
4
,
5
)
self
.
axis
=
1
self
.
new_shape
=
(
3
,
40
)
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
,
no_check_set
=
[
"XShape"
])
def
init_attrs
(
self
):
self
.
attrs
=
{
"axis"
:
self
.
axis
}
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
"X"
],
"Out"
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
4
,
5
)
self
.
axis
=
1
self
.
new_shape
=
(
3
,
40
)
class
TestFlatten2OpWithCornerAxis
(
TestFlatten2Op
):
def
init_attrs
(
self
):
self
.
attrs
=
{
"axis"
:
self
.
axis
}
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
axis
=
0
self
.
new_shape
=
(
1
,
120
)
class
TestFlatten2OpWithCornerAxis
(
TestFlatten2Op
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
axis
=
0
self
.
new_shape
=
(
1
,
120
)
class
TestFlatten2OpWithDefaultAxis
(
TestFlatten2Op
):
class
TestFlatten2OpWithDefaultAxis
(
TestFlatten2Op
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
10
,
2
,
2
,
3
)
self
.
new_shape
=
(
10
,
12
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
10
,
2
,
2
,
3
)
self
.
new_shape
=
(
10
,
12
)
def
init_attrs
(
self
):
self
.
attrs
=
{}
def
init_attrs
(
self
):
self
.
attrs
=
{}
class
TestFlatten2OpSixDims
(
TestFlatten2Op
):
class
TestFlatten2OpSixDims
(
TestFlatten2Op
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
3
,
2
,
4
,
4
)
self
.
axis
=
4
self
.
new_shape
=
(
36
,
16
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
3
,
2
,
4
,
4
)
self
.
axis
=
4
self
.
new_shape
=
(
36
,
16
)
support_types
=
get_xpu_op_support_types
(
'flatten2'
)
support_types_for_grad
=
get_xpu_op_support_types
(
'mean'
)
for
stype
in
support_types
:
if
stype
in
support_types_for_grad
:
create_test_class
(
globals
(),
XPUTestFlatten2Op
,
stype
)
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/xpu/test_flatten_contiguous_range_op_xpu.py
浏览文件 @
9a1fdad3
...
...
@@ -17,7 +17,6 @@ from __future__ import print_function
import
sys
sys
.
path
.
append
(
".."
)
import
numpy
as
np
import
unittest
import
sys
...
...
@@ -27,215 +26,214 @@ from op_test import OpTest
from
op_test_xpu
import
XPUOpTest
import
paddle
import
paddle.fluid
as
fluid
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
class
TestFlattenOp
(
XPUOpTest
):
def
setUp
(
self
):
self
.
set_xpu
()
self
.
op_type
=
"flatten_contiguous_range"
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
use_xpu
=
True
self
.
use_mkldnn
=
False
self
.
start_axis
=
0
self
.
stop_axis
=
-
1
self
.
dtype
=
np
.
float32
self
.
init_test_case
()
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
self
.
dtype
)}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
),
"XShape"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
"float32"
)
}
def
set_xpu
(
self
):
self
.
__class__
.
use_xpu
=
True
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
,
no_check_set
=
[
"XShape"
])
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
"X"
],
"Out"
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
-
1
self
.
new_shape
=
(
120
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
,
'use_xpu'
:
True
,
}
class
TestFlattenOp_1
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
1
self
.
stop_axis
=
2
self
.
new_shape
=
(
3
,
10
,
4
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_2
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_3
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
2
self
.
new_shape
=
(
30
,
4
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_4
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
-
2
self
.
stop_axis
=
-
1
self
.
new_shape
=
(
3
,
2
,
20
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_5
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
2
self
.
stop_axis
=
2
self
.
new_shape
=
(
3
,
2
,
5
,
4
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOpSixDims
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
3
,
2
,
4
,
4
)
self
.
start_axis
=
3
self
.
stop_axis
=
5
self
.
new_shape
=
(
3
,
2
,
3
,
32
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_Float32
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
dtype
=
np
.
float32
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_int32
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
dtype
=
np
.
int32
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
,
'use_xpu'
:
True
}
def
test_check_grad
(
self
):
pass
class
TestFlattenOp_int8
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
dtype
=
np
.
int8
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
def
test_check_grad
(
self
):
pass
class
TestFlattenOp_int64
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
dtype
=
np
.
int64
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
def
test_check_grad
(
self
):
pass
class
XPUTestFlattenOp
(
XPUOpTestWrapper
):
def
__init__
(
self
):
self
.
op_name
=
'flatten_contiguous_range'
self
.
use_dynamic_create_class
=
False
class
TestFlattenOp
(
XPUOpTest
):
def
setUp
(
self
):
self
.
set_xpu
()
self
.
op_type
=
"flatten_contiguous_range"
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
use_xpu
=
True
self
.
use_mkldnn
=
False
self
.
start_axis
=
0
self
.
stop_axis
=
-
1
self
.
dtype
=
self
.
in_type
self
.
init_test_case
()
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
self
.
dtype
)
}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
),
"XShape"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
self
.
dtype
)
}
def
set_xpu
(
self
):
self
.
__class__
.
use_xpu
=
True
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
,
no_check_set
=
[
"XShape"
])
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
"X"
],
"Out"
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
-
1
self
.
new_shape
=
(
120
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
,
'use_xpu'
:
True
,
}
class
TestFlattenOp_1
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
1
self
.
stop_axis
=
2
self
.
new_shape
=
(
3
,
10
,
4
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_2
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_3
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
2
self
.
new_shape
=
(
30
,
4
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_4
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
-
2
self
.
stop_axis
=
-
1
self
.
new_shape
=
(
3
,
2
,
20
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_5
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
2
self
.
stop_axis
=
2
self
.
new_shape
=
(
3
,
2
,
5
,
4
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOpSixDims
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
3
,
2
,
4
,
4
)
self
.
start_axis
=
3
self
.
stop_axis
=
5
self
.
new_shape
=
(
3
,
2
,
3
,
32
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_Float32
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
dtype
=
np
.
float32
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_int32
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
dtype
=
np
.
int32
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
,
'use_xpu'
:
True
}
def
test_check_grad
(
self
):
pass
class
TestFlattenOp_int8
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
dtype
=
np
.
int8
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
def
test_check_grad
(
self
):
pass
class
TestFlattenOp_int64
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
dtype
=
np
.
int64
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
def
test_check_grad
(
self
):
pass
class
TestFlatten2OpError
(
unittest
.
TestCase
):
...
...
@@ -338,5 +336,11 @@ class TestFlattenPython(unittest.TestCase):
self
.
assertTrue
((
2
,
3
,
16
)
==
res_shape
)
support_types
=
get_xpu_op_support_types
(
'flatten_contiguous_range'
)
support_types_for_grad
=
get_xpu_op_support_types
(
'mean'
)
for
stype
in
support_types
:
if
stype
in
support_types_for_grad
:
create_test_class
(
globals
(),
XPUTestFlattenOp
,
stype
)
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/xpu/test_flatten_op_xpu.py
浏览文件 @
9a1fdad3
...
...
@@ -23,61 +23,74 @@ import paddle
import
paddle.fluid
as
fluid
from
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
class
TestFlattenOp
(
XPUOpTest
):
class
XPUTestFlattenOp
(
XPUOpTestWrapper
):
def
setUp
(
self
):
self
.
op_type
=
"flatten"
self
.
use_xpu
=
True
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_test_case
()
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
"float32"
)}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
)}
def
__init__
(
self
):
self
.
op_name
=
'flatten'
self
.
use_dynamic_create_class
=
False
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
)
class
TestFlattenOp
(
XPUOpTest
):
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
"X"
],
"Out"
)
def
setUp
(
self
):
self
.
op_type
=
"flatten"
self
.
use_xpu
=
True
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_test_case
()
self
.
dtype
=
self
.
in_type
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
self
.
dtype
)
}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
)}
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
2
,
10
)
self
.
axis
=
1
self
.
new_shape
=
(
3
,
40
)
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
)
def
init_attrs
(
self
):
self
.
attrs
=
{
"axis"
:
self
.
axis
}
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
"X"
],
"Out"
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
2
,
10
)
self
.
axis
=
1
self
.
new_shape
=
(
3
,
40
)
class
TestFlattenOp1
(
TestFlattenOp
):
def
init_attrs
(
self
):
self
.
attrs
=
{
"axis"
:
self
.
axis
}
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
2
,
10
)
self
.
axis
=
0
self
.
new_shape
=
(
1
,
120
)
class
TestFlattenOp1
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
2
,
10
)
self
.
axis
=
0
self
.
new_shape
=
(
1
,
120
)
class
TestFlattenOpWithDefaultAxis
(
TestFlattenOp
):
class
TestFlattenOpWithDefaultAxis
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
10
,
2
,
2
,
3
)
self
.
new_shape
=
(
10
,
12
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
10
,
2
,
2
,
3
)
self
.
new_shape
=
(
10
,
12
)
def
init_attrs
(
self
):
self
.
attrs
=
{}
def
init_attrs
(
self
):
self
.
attrs
=
{}
class
TestFlattenOpSixDims
(
TestFlattenOp
):
class
TestFlattenOpSixDims
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
3
,
2
,
4
,
4
)
self
.
axis
=
4
self
.
new_shape
=
(
36
,
16
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
3
,
2
,
4
,
4
)
self
.
axis
=
4
self
.
new_shape
=
(
36
,
16
)
support_types
=
get_xpu_op_support_types
(
'flatten'
)
support_types_for_grad
=
get_xpu_op_support_types
(
'mean'
)
for
stype
in
support_types
:
if
stype
in
support_types_for_grad
:
create_test_class
(
globals
(),
XPUTestFlattenOp
,
stype
)
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/xpu/test_layer_norm_op_xpu.py
浏览文件 @
9a1fdad3
...
...
@@ -20,7 +20,9 @@ from functools import reduce
sys
.
path
.
append
(
".."
)
from
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
operator
import
mul
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
...
...
@@ -42,77 +44,77 @@ def ref_layer_norm(x, scale, bias, epsilon, begin_norm_axis=1):
return
y
,
mean
,
variance
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPULayerNormOp
(
OpTest
):
class
XPUTestLayerNormOp
(
XPUOpTestWrapper
):
def
setUp
(
self
):
self
.
op_type
=
"layer_norm"
self
.
dtype
=
np
.
float32
self
.
shape
=
[
2
,
3
,
4
,
5
]
self
.
epsilon
=
1e-05
self
.
begin_norm_axis
=
1
self
.
set_attrs
()
def
__init__
(
self
):
self
.
op_name
=
'layer_norm'
self
.
use_dynamic_create_class
=
False
right
=
reduce
(
mul
,
self
.
shape
[
self
.
begin_norm_axis
:
len
(
self
.
shape
)],
1
)
np
.
random
.
seed
(
10
)
x_np
=
np
.
random
.
uniform
(
0.1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
scale_np
=
np
.
random
.
uniform
(
0.1
,
1
,
[
right
]).
astype
(
self
.
dtype
)
bias_np
=
np
.
random
.
uniform
(
0.1
,
1
,
[
right
]).
astype
(
self
.
dtype
)
ref_y_np
,
ref_mean_np
,
ref_variance_np
=
ref_layer_norm
(
x_np
,
scale_np
,
bias_np
,
self
.
epsilon
,
self
.
begin_norm_axis
)
class
TestXPULayerNormOp
(
XPUOpTest
):
self
.
inputs
=
{
'X'
:
x_np
,
'Scale'
:
scale_np
,
'Bias'
:
bias_np
}
self
.
outputs
=
{
'Y'
:
ref_y_np
,
'Mean'
:
ref_mean_np
,
'Variance'
:
ref_variance_np
}
self
.
attrs
=
{
'begin_norm_axis'
:
self
.
begin_norm_axis
,
'use_xpu'
:
True
}
def
setUp
(
self
):
self
.
op_type
=
"layer_norm"
self
.
dtype
=
self
.
in_type
self
.
shape
=
[
2
,
3
,
4
,
5
]
self
.
epsilon
=
1e-05
self
.
begin_norm_axis
=
1
self
.
set_attrs
()
def
set_attrs
(
self
):
pass
right
=
reduce
(
mul
,
self
.
shape
[
self
.
begin_norm_axis
:
len
(
self
.
shape
)],
1
)
np
.
random
.
seed
(
10
)
x_np
=
np
.
random
.
uniform
(
0.1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
scale_np
=
np
.
random
.
uniform
(
0.1
,
1
,
[
right
]).
astype
(
self
.
dtype
)
bias_np
=
np
.
random
.
uniform
(
0.1
,
1
,
[
right
]).
astype
(
self
.
dtype
)
ref_y_np
,
ref_mean_np
,
ref_variance_np
=
ref_layer_norm
(
x_np
,
scale_np
,
bias_np
,
self
.
epsilon
,
self
.
begin_norm_axis
)
def
test_check_output
(
self
):
self
.
check_output_with_place
(
paddle
.
XPUPlace
(
0
),
atol
=
1e-4
)
self
.
inputs
=
{
'X'
:
x_np
,
'Scale'
:
scale_np
,
'Bias'
:
bias_np
}
self
.
outputs
=
{
'Y'
:
ref_y_np
,
'Mean'
:
ref_mean_np
,
'Variance'
:
ref_variance_np
}
self
.
attrs
=
{
'begin_norm_axis'
:
self
.
begin_norm_axis
,
'use_xpu'
:
True
}
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
paddle
.
XPUPlace
(
0
),
[
'X'
],
'Y'
,
max_relative_error
=
0.02
)
def
set_attrs
(
self
):
pass
def
test_check_output
(
self
):
self
.
check_output_with_place
(
paddle
.
XPUPlace
(
0
),
atol
=
1e-4
)
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPULayerNormOpAxis2
(
TestXPULayerNormOp
):
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
paddle
.
XPUPlace
(
0
),
[
'X'
],
'Y'
,
max_relative_error
=
0.02
)
def
set_attrs
(
self
):
self
.
begin_norm_axis
=
2
class
TestXPULayerNormOpAxis2
(
TestXPULayerNormOp
):
def
set_attrs
(
self
):
self
.
begin_norm_axis
=
2
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPULayerNormOpAxis3
(
TestXPULayerNormOp
):
class
TestXPULayerNormOpAxis3
(
TestXPULayerNormOp
):
def
set_attrs
(
self
):
self
.
begin_norm_axis
=
3
def
set_attrs
(
self
):
self
.
begin_norm_axis
=
3
class
TestXPULayerNormOp2D
(
TestXPULayerNormOp
):
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPULayerNormOp2D
(
TestXPULayerNormOp
):
def
set_attrs
(
self
):
self
.
shape
=
[
10
,
12
]
def
set_attrs
(
self
):
self
.
shape
=
[
10
,
12
]
class
TestXPULayerNormOp3D
(
TestXPULayerNormOp
):
def
set_attrs
(
self
):
self
.
shape
=
[
4
,
5
,
6
]
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPULayerNormOp3D
(
TestXPULayerNormOp
):
def
set_attrs
(
self
):
self
.
shape
=
[
4
,
5
,
6
]
support_types
=
get_xpu_op_support_types
(
'layer_norm'
)
for
stype
in
support_types
:
create_test_class
(
globals
(),
XPUTestLayerNormOp
,
stype
)
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录