Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
9a1fdad3
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
9a1fdad3
编写于
7月 01, 2022
作者:
Y
ykkk2333
提交者:
GitHub
7月 01, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update new unittests of flatten ops and layernorm, *test=kunlun (#43895)
上级
37f2151f
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
366 addition
and
334 deletion
+366
-334
python/paddle/fluid/tests/unittests/xpu/test_flatten2_op_xpu.py
.../paddle/fluid/tests/unittests/xpu/test_flatten2_op_xpu.py
+53
-40
python/paddle/fluid/tests/unittests/xpu/test_flatten_contiguous_range_op_xpu.py
...sts/unittests/xpu/test_flatten_contiguous_range_op_xpu.py
+210
-206
python/paddle/fluid/tests/unittests/xpu/test_flatten_op_xpu.py
...n/paddle/fluid/tests/unittests/xpu/test_flatten_op_xpu.py
+48
-35
python/paddle/fluid/tests/unittests/xpu/test_layer_norm_op_xpu.py
...addle/fluid/tests/unittests/xpu/test_layer_norm_op_xpu.py
+55
-53
未找到文件。
python/paddle/fluid/tests/unittests/xpu/test_flatten2_op_xpu.py
浏览文件 @
9a1fdad3
...
@@ -23,67 +23,80 @@ import paddle
...
@@ -23,67 +23,80 @@ import paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
op_test
import
OpTest
from
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
paddle
.
enable_static
()
class
TestFlatten2Op
(
XPUOpTest
):
class
XPUTestFlatten2Op
(
XPUOpTestWrapper
):
def
setUp
(
self
):
def
__init__
(
self
):
self
.
set_xpu
()
self
.
op_name
=
'flatten2'
self
.
op_type
=
"flatten2"
self
.
use_dynamic_create_class
=
False
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_test_case
()
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
"float32"
)}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
),
"XShape"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
"float32"
)
}
def
set_xpu
(
self
):
class
TestFlatten2Op
(
XPUOpTest
):
self
.
__class__
.
use_xpu
=
True
def
test_check_output
(
self
):
def
setUp
(
self
):
self
.
check_output_with_place
(
self
.
place
,
no_check_set
=
[
"XShape"
])
self
.
set_xpu
()
self
.
op_type
=
"flatten2"
self
.
dtype
=
self
.
in_type
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_test_case
()
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
self
.
dtype
)
}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
),
"XShape"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
self
.
dtype
)
}
def
test_check_grad
(
self
):
def
set_xpu
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
"X"
],
"Out"
)
self
.
__class__
.
use_xpu
=
True
def
init_test_case
(
self
):
def
test_check_output
(
self
):
self
.
in_shape
=
(
3
,
2
,
4
,
5
)
self
.
check_output_with_place
(
self
.
place
,
no_check_set
=
[
"XShape"
])
self
.
axis
=
1
self
.
new_shape
=
(
3
,
40
)
def
init_attrs
(
self
):
def
test_check_grad
(
self
):
self
.
attrs
=
{
"axis"
:
self
.
axis
}
self
.
check_grad_with_place
(
self
.
place
,
[
"X"
],
"Out"
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
4
,
5
)
self
.
axis
=
1
self
.
new_shape
=
(
3
,
40
)
class
TestFlatten2OpWithCornerAxis
(
TestFlatten2Op
):
def
init_attrs
(
self
):
self
.
attrs
=
{
"axis"
:
self
.
axis
}
def
init_test_case
(
self
):
class
TestFlatten2OpWithCornerAxis
(
TestFlatten2Op
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
axis
=
0
self
.
new_shape
=
(
1
,
120
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
axis
=
0
self
.
new_shape
=
(
1
,
120
)
class
TestFlatten2OpWithDefaultAxis
(
TestFlatten2Op
):
class
TestFlatten2OpWithDefaultAxis
(
TestFlatten2Op
):
def
init_test_case
(
self
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
10
,
2
,
2
,
3
)
self
.
in_shape
=
(
10
,
2
,
2
,
3
)
self
.
new_shape
=
(
10
,
12
)
self
.
new_shape
=
(
10
,
12
)
def
init_attrs
(
self
):
def
init_attrs
(
self
):
self
.
attrs
=
{}
self
.
attrs
=
{}
class
TestFlatten2OpSixDims
(
TestFlatten2Op
):
class
TestFlatten2OpSixDims
(
TestFlatten2Op
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
3
,
2
,
4
,
4
)
self
.
axis
=
4
self
.
new_shape
=
(
36
,
16
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
3
,
2
,
4
,
4
)
self
.
axis
=
4
self
.
new_shape
=
(
36
,
16
)
support_types
=
get_xpu_op_support_types
(
'flatten2'
)
support_types_for_grad
=
get_xpu_op_support_types
(
'mean'
)
for
stype
in
support_types
:
if
stype
in
support_types_for_grad
:
create_test_class
(
globals
(),
XPUTestFlatten2Op
,
stype
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/xpu/test_flatten_contiguous_range_op_xpu.py
浏览文件 @
9a1fdad3
...
@@ -17,7 +17,6 @@ from __future__ import print_function
...
@@ -17,7 +17,6 @@ from __future__ import print_function
import
sys
import
sys
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
import
numpy
as
np
import
numpy
as
np
import
unittest
import
unittest
import
sys
import
sys
...
@@ -27,215 +26,214 @@ from op_test import OpTest
...
@@ -27,215 +26,214 @@ from op_test import OpTest
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
paddle
.
enable_static
()
class
TestFlattenOp
(
XPUOpTest
):
class
XPUTestFlattenOp
(
XPUOpTestWrapper
):
def
setUp
(
self
):
def
__init__
(
self
):
self
.
set_xpu
()
self
.
op_name
=
'flatten_contiguous_range'
self
.
op_type
=
"flatten_contiguous_range"
self
.
use_dynamic_create_class
=
False
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
use_xpu
=
True
class
TestFlattenOp
(
XPUOpTest
):
self
.
use_mkldnn
=
False
def
setUp
(
self
):
self
.
start_axis
=
0
self
.
set_xpu
()
self
.
stop_axis
=
-
1
self
.
op_type
=
"flatten_contiguous_range"
self
.
dtype
=
np
.
float32
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_test_case
()
self
.
use_xpu
=
True
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
self
.
dtype
)}
self
.
use_mkldnn
=
False
self
.
init_attrs
()
self
.
outputs
=
{
self
.
start_axis
=
0
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
),
self
.
stop_axis
=
-
1
"XShape"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
"float32"
)
self
.
dtype
=
self
.
in_type
}
self
.
init_test_case
()
self
.
inputs
=
{
def
set_xpu
(
self
):
"X"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
self
.
dtype
)
self
.
__class__
.
use_xpu
=
True
}
self
.
init_attrs
()
def
test_check_output
(
self
):
self
.
outputs
=
{
self
.
check_output_with_place
(
self
.
place
,
no_check_set
=
[
"XShape"
])
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
),
"XShape"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
self
.
dtype
)
def
test_check_grad
(
self
):
}
self
.
check_grad_with_place
(
self
.
place
,
[
"X"
],
"Out"
)
def
set_xpu
(
self
):
def
init_test_case
(
self
):
self
.
__class__
.
use_xpu
=
True
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
def
test_check_output
(
self
):
self
.
stop_axis
=
-
1
self
.
check_output_with_place
(
self
.
place
,
no_check_set
=
[
"XShape"
])
self
.
new_shape
=
(
120
)
def
test_check_grad
(
self
):
def
init_attrs
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
"X"
],
"Out"
)
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
def
init_test_case
(
self
):
"stop_axis"
:
self
.
stop_axis
,
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
'use_xpu'
:
True
,
self
.
start_axis
=
0
}
self
.
stop_axis
=
-
1
self
.
new_shape
=
(
120
)
class
TestFlattenOp_1
(
TestFlattenOp
):
def
init_attrs
(
self
):
self
.
attrs
=
{
def
init_test_case
(
self
):
"start_axis"
:
self
.
start_axis
,
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
"stop_axis"
:
self
.
stop_axis
,
self
.
start_axis
=
1
'use_xpu'
:
True
,
self
.
stop_axis
=
2
}
self
.
new_shape
=
(
3
,
10
,
4
)
class
TestFlattenOp_1
(
TestFlattenOp
):
def
init_attrs
(
self
):
self
.
attrs
=
{
def
init_test_case
(
self
):
"start_axis"
:
self
.
start_axis
,
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
"stop_axis"
:
self
.
stop_axis
self
.
start_axis
=
1
}
self
.
stop_axis
=
2
self
.
new_shape
=
(
3
,
10
,
4
)
class
TestFlattenOp_2
(
TestFlattenOp
):
def
init_attrs
(
self
):
self
.
attrs
=
{
def
init_test_case
(
self
):
"start_axis"
:
self
.
start_axis
,
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
"stop_axis"
:
self
.
stop_axis
self
.
start_axis
=
0
}
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
class
TestFlattenOp_2
(
TestFlattenOp
):
def
init_attrs
(
self
):
def
init_test_case
(
self
):
self
.
attrs
=
{
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
"start_axis"
:
self
.
start_axis
,
self
.
start_axis
=
0
"stop_axis"
:
self
.
stop_axis
self
.
stop_axis
=
1
}
self
.
new_shape
=
(
6
,
5
,
4
)
def
init_attrs
(
self
):
class
TestFlattenOp_3
(
TestFlattenOp
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
def
init_test_case
(
self
):
"stop_axis"
:
self
.
stop_axis
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
}
self
.
start_axis
=
0
self
.
stop_axis
=
2
class
TestFlattenOp_3
(
TestFlattenOp
):
self
.
new_shape
=
(
30
,
4
)
def
init_test_case
(
self
):
def
init_attrs
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
attrs
=
{
self
.
start_axis
=
0
"start_axis"
:
self
.
start_axis
,
self
.
stop_axis
=
2
"stop_axis"
:
self
.
stop_axis
self
.
new_shape
=
(
30
,
4
)
}
def
init_attrs
(
self
):
self
.
attrs
=
{
class
TestFlattenOp_4
(
TestFlattenOp
):
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
def
init_test_case
(
self
):
}
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
-
2
class
TestFlattenOp_4
(
TestFlattenOp
):
self
.
stop_axis
=
-
1
self
.
new_shape
=
(
3
,
2
,
20
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
def
init_attrs
(
self
):
self
.
start_axis
=
-
2
self
.
attrs
=
{
self
.
stop_axis
=
-
1
"start_axis"
:
self
.
start_axis
,
self
.
new_shape
=
(
3
,
2
,
20
)
"stop_axis"
:
self
.
stop_axis
}
def
init_attrs
(
self
):
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
class
TestFlattenOp_5
(
TestFlattenOp
):
"stop_axis"
:
self
.
stop_axis
}
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
class
TestFlattenOp_5
(
TestFlattenOp
):
self
.
start_axis
=
2
self
.
stop_axis
=
2
def
init_test_case
(
self
):
self
.
new_shape
=
(
3
,
2
,
5
,
4
)
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
2
def
init_attrs
(
self
):
self
.
stop_axis
=
2
self
.
attrs
=
{
self
.
new_shape
=
(
3
,
2
,
5
,
4
)
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
def
init_attrs
(
self
):
}
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
class
TestFlattenOpSixDims
(
TestFlattenOp
):
}
def
init_test_case
(
self
):
class
TestFlattenOpSixDims
(
TestFlattenOp
):
self
.
in_shape
=
(
3
,
2
,
3
,
2
,
4
,
4
)
self
.
start_axis
=
3
def
init_test_case
(
self
):
self
.
stop_axis
=
5
self
.
in_shape
=
(
3
,
2
,
3
,
2
,
4
,
4
)
self
.
new_shape
=
(
3
,
2
,
3
,
32
)
self
.
start_axis
=
3
self
.
stop_axis
=
5
def
init_attrs
(
self
):
self
.
new_shape
=
(
3
,
2
,
3
,
32
)
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
def
init_attrs
(
self
):
"stop_axis"
:
self
.
stop_axis
self
.
attrs
=
{
}
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_Float32
(
TestFlattenOp
):
class
TestFlattenOp_Float32
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
def
init_test_case
(
self
):
self
.
start_axis
=
0
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
stop_axis
=
1
self
.
start_axis
=
0
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
stop_axis
=
1
self
.
dtype
=
np
.
float32
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
dtype
=
np
.
float32
def
init_attrs
(
self
):
self
.
attrs
=
{
def
init_attrs
(
self
):
"start_axis"
:
self
.
start_axis
,
self
.
attrs
=
{
"stop_axis"
:
self
.
stop_axis
"start_axis"
:
self
.
start_axis
,
}
"stop_axis"
:
self
.
stop_axis
}
class
TestFlattenOp_int32
(
TestFlattenOp
):
class
TestFlattenOp_int32
(
TestFlattenOp
):
def
init_test_case
(
self
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
start_axis
=
0
self
.
stop_axis
=
1
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
dtype
=
np
.
int32
self
.
dtype
=
np
.
int32
def
init_attrs
(
self
):
def
init_attrs
(
self
):
self
.
attrs
=
{
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
,
"stop_axis"
:
self
.
stop_axis
,
'use_xpu'
:
True
'use_xpu'
:
True
}
}
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
pass
pass
class
TestFlattenOp_int8
(
TestFlattenOp
):
class
TestFlattenOp_int8
(
TestFlattenOp
):
def
init_test_case
(
self
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
start_axis
=
0
self
.
start_axis
=
0
self
.
stop_axis
=
1
self
.
stop_axis
=
1
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
dtype
=
np
.
int8
self
.
dtype
=
np
.
int8
def
init_attrs
(
self
):
def
init_attrs
(
self
):
self
.
attrs
=
{
self
.
attrs
=
{
"start_axis"
:
self
.
start_axis
,
"start_axis"
:
self
.
start_axis
,
"stop_axis"
:
self
.
stop_axis
"stop_axis"
:
self
.
stop_axis
}
}
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
pass
pass
class
TestFlattenOp_int64
(
TestFlattenOp
):
class
TestFlattenOp_int64
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
def
init_test_case
(
self
):
self
.
start_axis
=
0
self
.
in_shape
=
(
3
,
2
,
5
,
4
)
self
.
stop_axis
=
1
self
.
start_axis
=
0
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
stop_axis
=
1
self
.
dtype
=
np
.
int64
self
.
new_shape
=
(
6
,
5
,
4
)
self
.
dtype
=
np
.
int64
def
init_attrs
(
self
):
self
.
attrs
=
{
def
init_attrs
(
self
):
"start_axis"
:
self
.
start_axis
,
self
.
attrs
=
{
"stop_axis"
:
self
.
stop_axis
"start_axis"
:
self
.
start_axis
,
}
"stop_axis"
:
self
.
stop_axis
}
def
test_check_grad
(
self
):
pass
def
test_check_grad
(
self
):
pass
class
TestFlatten2OpError
(
unittest
.
TestCase
):
class
TestFlatten2OpError
(
unittest
.
TestCase
):
...
@@ -338,5 +336,11 @@ class TestFlattenPython(unittest.TestCase):
...
@@ -338,5 +336,11 @@ class TestFlattenPython(unittest.TestCase):
self
.
assertTrue
((
2
,
3
,
16
)
==
res_shape
)
self
.
assertTrue
((
2
,
3
,
16
)
==
res_shape
)
support_types
=
get_xpu_op_support_types
(
'flatten_contiguous_range'
)
support_types_for_grad
=
get_xpu_op_support_types
(
'mean'
)
for
stype
in
support_types
:
if
stype
in
support_types_for_grad
:
create_test_class
(
globals
(),
XPUTestFlattenOp
,
stype
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/xpu/test_flatten_op_xpu.py
浏览文件 @
9a1fdad3
...
@@ -23,61 +23,74 @@ import paddle
...
@@ -23,61 +23,74 @@ import paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
op_test
import
OpTest
from
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
paddle
.
enable_static
()
class
TestFlattenOp
(
XPUOpTest
):
class
XPUTestFlattenOp
(
XPUOpTestWrapper
):
def
setUp
(
self
):
def
__init__
(
self
):
self
.
op_type
=
"flatten"
self
.
op_name
=
'flatten'
self
.
use_xpu
=
True
self
.
use_dynamic_create_class
=
False
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_test_case
()
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
"float32"
)}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
)}
def
test_check_output
(
self
):
class
TestFlattenOp
(
XPUOpTest
):
self
.
check_output_with_place
(
self
.
place
)
def
test_check_grad
(
self
):
def
setUp
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
"X"
],
"Out"
)
self
.
op_type
=
"flatten"
self
.
use_xpu
=
True
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_test_case
()
self
.
dtype
=
self
.
in_type
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
self
.
in_shape
).
astype
(
self
.
dtype
)
}
self
.
init_attrs
()
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
new_shape
)}
def
init_test_case
(
self
):
def
test_check_output
(
self
):
self
.
in_shape
=
(
3
,
2
,
2
,
10
)
self
.
check_output_with_place
(
self
.
place
)
self
.
axis
=
1
self
.
new_shape
=
(
3
,
40
)
def
init_attrs
(
self
):
def
test_check_grad
(
self
):
self
.
attrs
=
{
"axis"
:
self
.
axis
}
self
.
check_grad_with_place
(
self
.
place
,
[
"X"
],
"Out"
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
2
,
10
)
self
.
axis
=
1
self
.
new_shape
=
(
3
,
40
)
class
TestFlattenOp1
(
TestFlattenOp
):
def
init_attrs
(
self
):
self
.
attrs
=
{
"axis"
:
self
.
axis
}
def
init_test_case
(
self
):
class
TestFlattenOp1
(
TestFlattenOp
):
self
.
in_shape
=
(
3
,
2
,
2
,
10
)
self
.
axis
=
0
self
.
new_shape
=
(
1
,
120
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
2
,
10
)
self
.
axis
=
0
self
.
new_shape
=
(
1
,
120
)
class
TestFlattenOpWithDefaultAxis
(
TestFlattenOp
):
class
TestFlattenOpWithDefaultAxis
(
TestFlattenOp
):
def
init_test_case
(
self
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
10
,
2
,
2
,
3
)
self
.
in_shape
=
(
10
,
2
,
2
,
3
)
self
.
new_shape
=
(
10
,
12
)
self
.
new_shape
=
(
10
,
12
)
def
init_attrs
(
self
):
def
init_attrs
(
self
):
self
.
attrs
=
{}
self
.
attrs
=
{}
class
TestFlattenOpSixDims
(
TestFlattenOp
):
class
TestFlattenOpSixDims
(
TestFlattenOp
):
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
3
,
2
,
4
,
4
)
self
.
axis
=
4
self
.
new_shape
=
(
36
,
16
)
def
init_test_case
(
self
):
self
.
in_shape
=
(
3
,
2
,
3
,
2
,
4
,
4
)
self
.
axis
=
4
self
.
new_shape
=
(
36
,
16
)
support_types
=
get_xpu_op_support_types
(
'flatten'
)
support_types_for_grad
=
get_xpu_op_support_types
(
'mean'
)
for
stype
in
support_types
:
if
stype
in
support_types_for_grad
:
create_test_class
(
globals
(),
XPUTestFlattenOp
,
stype
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/xpu/test_layer_norm_op_xpu.py
浏览文件 @
9a1fdad3
...
@@ -20,7 +20,9 @@ from functools import reduce
...
@@ -20,7 +20,9 @@ from functools import reduce
sys
.
path
.
append
(
".."
)
sys
.
path
.
append
(
".."
)
from
op_test
import
OpTest
from
op_test
import
OpTest
from
op_test_xpu
import
XPUOpTest
from
operator
import
mul
from
operator
import
mul
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
paddle
.
enable_static
()
...
@@ -42,77 +44,77 @@ def ref_layer_norm(x, scale, bias, epsilon, begin_norm_axis=1):
...
@@ -42,77 +44,77 @@ def ref_layer_norm(x, scale, bias, epsilon, begin_norm_axis=1):
return
y
,
mean
,
variance
return
y
,
mean
,
variance
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
class
XPUTestLayerNormOp
(
XPUOpTestWrapper
):
"core is not compiled with XPU"
)
class
TestXPULayerNormOp
(
OpTest
):
def
setUp
(
self
):
def
__init__
(
self
):
self
.
op_type
=
"layer_norm"
self
.
op_name
=
'layer_norm'
self
.
dtype
=
np
.
float32
self
.
use_dynamic_create_class
=
False
self
.
shape
=
[
2
,
3
,
4
,
5
]
self
.
epsilon
=
1e-05
self
.
begin_norm_axis
=
1
self
.
set_attrs
()
right
=
reduce
(
mul
,
self
.
shape
[
self
.
begin_norm_axis
:
len
(
self
.
shape
)],
1
)
class
TestXPULayerNormOp
(
XPUOpTest
):
np
.
random
.
seed
(
10
)
x_np
=
np
.
random
.
uniform
(
0.1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
scale_np
=
np
.
random
.
uniform
(
0.1
,
1
,
[
right
]).
astype
(
self
.
dtype
)
bias_np
=
np
.
random
.
uniform
(
0.1
,
1
,
[
right
]).
astype
(
self
.
dtype
)
ref_y_np
,
ref_mean_np
,
ref_variance_np
=
ref_layer_norm
(
x_np
,
scale_np
,
bias_np
,
self
.
epsilon
,
self
.
begin_norm_axis
)
self
.
inputs
=
{
'X'
:
x_np
,
'Scale'
:
scale_np
,
'Bias'
:
bias_np
}
def
setUp
(
self
):
self
.
outputs
=
{
self
.
op_type
=
"layer_norm"
'Y'
:
ref_y_np
,
self
.
dtype
=
self
.
in_type
'Mean'
:
ref_mean_np
,
self
.
shape
=
[
2
,
3
,
4
,
5
]
'Variance'
:
ref_variance_np
self
.
epsilon
=
1e-05
}
self
.
begin_norm_axis
=
1
self
.
attrs
=
{
'begin_norm_axis'
:
self
.
begin_norm_axis
,
'use_xpu'
:
True
}
self
.
set_attrs
()
def
set_attrs
(
self
):
right
=
reduce
(
mul
,
pass
self
.
shape
[
self
.
begin_norm_axis
:
len
(
self
.
shape
)],
1
)
np
.
random
.
seed
(
10
)
x_np
=
np
.
random
.
uniform
(
0.1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
scale_np
=
np
.
random
.
uniform
(
0.1
,
1
,
[
right
]).
astype
(
self
.
dtype
)
bias_np
=
np
.
random
.
uniform
(
0.1
,
1
,
[
right
]).
astype
(
self
.
dtype
)
ref_y_np
,
ref_mean_np
,
ref_variance_np
=
ref_layer_norm
(
x_np
,
scale_np
,
bias_np
,
self
.
epsilon
,
self
.
begin_norm_axis
)
def
test_check_output
(
self
):
self
.
inputs
=
{
'X'
:
x_np
,
'Scale'
:
scale_np
,
'Bias'
:
bias_np
}
self
.
check_output_with_place
(
paddle
.
XPUPlace
(
0
),
atol
=
1e-4
)
self
.
outputs
=
{
'Y'
:
ref_y_np
,
'Mean'
:
ref_mean_np
,
'Variance'
:
ref_variance_np
}
self
.
attrs
=
{
'begin_norm_axis'
:
self
.
begin_norm_axis
,
'use_xpu'
:
True
}
def
test_check_grad
(
self
):
def
set_attrs
(
self
):
self
.
check_grad_with_place
(
paddle
.
XPUPlace
(
0
),
[
'X'
],
pass
'Y'
,
max_relative_error
=
0.02
)
def
test_check_output
(
self
):
self
.
check_output_with_place
(
paddle
.
XPUPlace
(
0
),
atol
=
1e-4
)
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
def
test_check_grad
(
self
):
"core is not compiled with XPU"
)
self
.
check_grad_with_place
(
paddle
.
XPUPlace
(
0
),
[
'X'
],
class
TestXPULayerNormOpAxis2
(
TestXPULayerNormOp
):
'Y'
,
max_relative_error
=
0.02
)
def
set_attrs
(
self
):
class
TestXPULayerNormOpAxis2
(
TestXPULayerNormOp
):
self
.
begin_norm_axis
=
2
def
set_attrs
(
self
):
self
.
begin_norm_axis
=
2
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
class
TestXPULayerNormOpAxis3
(
TestXPULayerNormOp
):
"core is not compiled with XPU"
)
class
TestXPULayerNormOpAxis3
(
TestXPULayerNormOp
):
def
set_attrs
(
self
):
def
set_attrs
(
self
):
self
.
begin_norm_axis
=
3
self
.
begin_norm_axis
=
3
class
TestXPULayerNormOp2D
(
TestXPULayerNormOp
):
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
def
set_attrs
(
self
):
"core is not compiled with XPU"
)
self
.
shape
=
[
10
,
12
]
class
TestXPULayerNormOp2D
(
TestXPULayerNormOp
):
def
set_attrs
(
self
):
class
TestXPULayerNormOp3D
(
TestXPULayerNormOp
):
self
.
shape
=
[
10
,
12
]
def
set_attrs
(
self
):
self
.
shape
=
[
4
,
5
,
6
]
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPULayerNormOp3D
(
TestXPULayerNormOp
):
def
set_attrs
(
self
):
self
.
shape
=
[
4
,
5
,
6
]
support_types
=
get_xpu_op_support_types
(
'layer_norm'
)
for
stype
in
support_types
:
create_test_class
(
globals
(),
XPUTestLayerNormOp
,
stype
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录