Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
c5179772
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
c5179772
编写于
2月 18, 2022
作者:
z8hanghuan
提交者:
GitHub
2月 18, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
new way of unit test , *test=kunlun (#39650)
* new way of unit test , *test=kunlun * new way of ut, *test=kunlun
上级
dc39eb18
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
268 addition
and
304 deletion
+268
-304
python/paddle/fluid/tests/unittests/xpu/test_adam_op_xpu.py
python/paddle/fluid/tests/unittests/xpu/test_adam_op_xpu.py
+146
-177
python/paddle/fluid/tests/unittests/xpu/test_concat_op_xpu.py
...on/paddle/fluid/tests/unittests/xpu/test_concat_op_xpu.py
+122
-127
未找到文件。
python/paddle/fluid/tests/unittests/xpu/test_adam_op_xpu.py
浏览文件 @
c5179772
...
...
@@ -23,163 +23,175 @@ from paddle.fluid.op import Operator
import
paddle.fluid
as
fluid
import
paddle
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
class
TestAdamOp1
(
OpTest
):
def
setUp
(
self
):
class
XPUTestAdamOp
(
XPUOpTestWrapper
):
def
__init__
(
self
):
self
.
op_name
=
'adam'
self
.
use_dynamic_create_class
=
False
class
TestAdamOp
(
XPUOpTest
):
'''Test Adam Op with supplied attributes
'''
self
.
op_type
=
"adam"
param
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
grad
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
moment1
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
# The second moment is positive
moment2
=
np
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
learning_rate
=
0.004
beta1
=
0.78
beta2
=
0.836
epsilon
=
1e-4
beta1_pow
=
beta1
**
10
beta2_pow
=
beta2
**
10
self
.
inputs
=
{
'Param'
:
param
,
'Grad'
:
grad
,
'Moment1'
:
moment1
,
'Moment2'
:
moment2
,
'LearningRate'
:
np
.
array
([
learning_rate
]).
astype
(
"float32"
),
'Beta1Pow'
:
np
.
array
([
beta1_pow
]).
astype
(
"float32"
),
'Beta2Pow'
:
np
.
array
([
beta2_pow
]).
astype
(
"float32"
)
}
self
.
attrs
=
{
'epsilon'
:
epsilon
,
'beta1'
:
beta1
,
'beta2'
:
beta2
}
def
setUp
(
self
):
self
.
init_dtype
()
self
.
set_xpu
()
self
.
op_type
=
"adam"
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
set_data
()
self
.
set_attrs
()
self
.
set_shape
()
self
.
set_inputs
()
self
.
set_steps
()
param_out
,
moment1_out
,
\
moment2_out
=
adam_step
(
self
.
inputs
,
self
.
attrs
)
param_out
,
moment1_out
,
\
moment2_out
=
adam_step
(
self
.
inputs
,
self
.
attrs
)
self
.
outputs
=
{
'Moment1Out'
:
moment1_out
,
'Moment2Out'
:
moment2_out
,
'ParamOut'
:
param_out
,
'Beta1PowOut'
:
np
.
array
([
self
.
beta1_pow
]).
astype
(
"float32"
)
*
self
.
beta1
,
'Beta2PowOut'
:
np
.
array
([
self
.
beta2_pow
]).
astype
(
"float32"
)
*
self
.
beta2
}
self
.
outputs
=
{
'Moment1Out'
:
moment1_out
,
'Moment2Out'
:
moment2_out
,
'ParamOut'
:
param_out
,
'Beta1PowOut'
:
np
.
array
([
beta1_pow
]).
astype
(
"float32"
)
*
beta1
,
'Beta2PowOut'
:
np
.
array
([
beta2_pow
]).
astype
(
"float32"
)
*
beta2
}
def
set_xpu
(
self
):
self
.
__class__
.
use_xpu
=
True
self
.
__class__
.
no_need_check_grad
=
True
self
.
__class__
.
op_type
=
self
.
in_type
def
test_check_output
(
self
):
self
.
check_output_with_place
(
place
=
paddle
.
XPUPlace
(
0
),
atol
=
1e-2
)
def
init_dtype
(
self
):
self
.
dtype
=
self
.
in_type
def
set_attrs
(
self
):
self
.
attrs
=
{
'epsilon'
:
self
.
epsilon
,
'beta1'
:
self
.
beta1
,
'beta2'
:
self
.
beta2
}
def
set_data
(
self
):
self
.
beta1
=
0.78
self
.
beta2
=
0.836
self
.
learning_rate
=
0.004
self
.
epsilon
=
1e-4
def
set_steps
(
self
):
self
.
num_steps
=
1
def
set_shape
(
self
):
self
.
shape
=
(
102
,
105
)
def
set_inputs
(
self
):
param
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
grad
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
moment1
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
# The second moment is positive
moment2
=
np
.
random
.
random
(
self
.
shape
).
astype
(
self
.
dtype
)
self
.
beta1_pow
=
self
.
beta1
**
10
self
.
beta2_pow
=
self
.
beta2
**
10
self
.
inputs
=
{
'Param'
:
param
,
'Grad'
:
grad
,
'Moment1'
:
moment1
,
'Moment2'
:
moment2
,
'LearningRate'
:
np
.
array
([
self
.
learning_rate
]).
astype
(
"float32"
),
'Beta1Pow'
:
np
.
array
([
self
.
beta1_pow
]).
astype
(
"float32"
),
'Beta2Pow'
:
np
.
array
([
self
.
beta2_pow
]).
astype
(
"float32"
)
}
class
TestAdamOp2
(
OpTest
):
def
setUp
(
self
):
def
test_check_output
(
self
):
self
.
check_output_with_place
(
place
=
paddle
.
XPUPlace
(
0
),
atol
=
1e-2
)
class
TestAdamOp2
(
TestAdamOp
):
'''Test Adam Op with supplied attributes
'''
self
.
op_type
=
"adam"
param
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
grad
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
moment1
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
# The second moment is positive
moment2
=
np
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
learning_rate
=
0.001
beta1
=
0.9
beta2
=
0.999
epsilon
=
1e-8
beta1_pow
=
beta1
**
10
beta2_pow
=
beta2
**
10
self
.
inputs
=
{
'Param'
:
param
,
'Grad'
:
grad
,
'Moment1'
:
moment1
,
'Moment2'
:
moment2
,
'LearningRate'
:
np
.
array
([
learning_rate
]).
astype
(
"float32"
),
'Beta1Pow'
:
np
.
array
([
beta1_pow
]).
astype
(
"float32"
),
'Beta2Pow'
:
np
.
array
([
beta2_pow
]).
astype
(
"float32"
)
}
attributes
=
{
'epsilon'
:
epsilon
,
'beta1'
:
beta1
,
'beta2'
:
beta2
}
def
set_data
(
self
):
self
.
beta1
=
0.9
self
.
beta2
=
0.999
self
.
learning_rate
=
0.001
self
.
epsilon
=
1e-8
class
TestAdamOp3
(
TestAdamOp2
):
'''Test Adam Op with supplied attributes
'''
param_out
,
moment1_out
,
\
moment2_out
=
adam_step
(
self
.
inputs
,
attributes
)
def
set_shape
(
self
):
self
.
shape
=
(
101
,
47
)
self
.
outputs
=
{
'Moment1Out'
:
moment1_out
,
'Moment2Out'
:
moment2_out
,
'ParamOut'
:
param_out
,
'Beta1PowOut'
:
np
.
array
([
beta1_pow
]).
astype
(
"float32"
)
*
beta1
,
'Beta2PowOut'
:
np
.
array
([
beta2_pow
]).
astype
(
"float32"
)
*
beta2
}
class
TestAdamOp4
(
TestAdamOp2
):
'''Test Adam Op with supplied attributes
'''
def
set_shape
(
self
):
self
.
shape
=
(
512
,
26
)
def
test_check_output
(
self
):
self
.
check_output_with_place
(
place
=
paddle
.
XPUPlace
(
0
),
atol
=
1e-2
)
class
TestAdamOp5
(
TestAdamOp2
):
'''Test Adam Op with supplied attributes
'''
def
set_shape
(
self
):
self
.
shape
=
(
11
,
1
)
class
TestAdamOp6
(
TestAdamOp2
):
'''Test Adam Op with beta as Variable
'''
def
set_shape
(
self
):
self
.
shape
=
(
10
,
10
)
class
TestAdamOpMultipleSteps
(
OpTest
):
def
setUp
(
self
):
def
set_data
(
self
):
self
.
beta1
=
0.85
self
.
beta2
=
0.95
self
.
learning_rate
=
0.001
self
.
epsilon
=
1e-8
class
TestAdamOpMultipleSteps
(
TestAdamOp2
):
'''Test Adam Operator with supplied attributes
'''
self
.
op_type
=
"adam"
self
.
num_steps
=
10
param
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
grad
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
moment1
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
# The second moment is positive
moment2
=
np
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
learning_rate
=
0.001
self
.
beta1
=
0.9
self
.
beta2
=
0.999
epsilon
=
1e-8
self
.
beta1_pow
=
self
.
beta1
**
10
self
.
beta2_pow
=
self
.
beta2
**
10
self
.
inputs
=
{
'Param'
:
param
,
'Grad'
:
grad
,
'Moment1'
:
moment1
,
'Moment2'
:
moment2
,
'LearningRate'
:
np
.
array
([
learning_rate
]).
astype
(
"float32"
),
'Beta1Pow'
:
np
.
array
([
self
.
beta1_pow
]).
astype
(
"float32"
),
'Beta2Pow'
:
np
.
array
([
self
.
beta2_pow
]).
astype
(
"float32"
)
}
self
.
attrs
=
{
'epsilon'
:
epsilon
,
'beta1'
:
self
.
beta1
,
'beta2'
:
self
.
beta2
}
def
set_steps
(
self
):
self
.
num_steps
=
10
def
test_check_output
(
self
):
for
_
in
range
(
self
.
num_steps
):
param_out
,
moment1_out
,
\
moment2_out
=
adam_step
(
self
.
inputs
,
self
.
attrs
)
def
test_check_output
(
self
):
for
_
in
range
(
self
.
num_steps
):
param_out
,
moment1_out
,
\
moment2_out
=
adam_step
(
self
.
inputs
,
self
.
attrs
)
beta1_pow_out
=
self
.
inputs
[
'Beta1Pow'
]
*
self
.
beta1
beta2_pow_out
=
self
.
inputs
[
'Beta2Pow'
]
*
self
.
beta2
self
.
outputs
=
{
'Moment1Out'
:
moment1_out
,
'Moment2Out'
:
moment2_out
,
'ParamOut'
:
param_out
,
'Beta1PowOut'
:
beta1_pow_out
,
'Beta2PowOut'
:
beta2_pow_out
}
beta1_pow_out
=
self
.
inputs
[
'Beta1Pow'
]
*
self
.
beta1
beta2_pow_out
=
self
.
inputs
[
'Beta2Pow'
]
*
self
.
beta2
self
.
outputs
=
{
'Moment1Out'
:
moment1_out
,
'Moment2Out'
:
moment2_out
,
'ParamOut'
:
param_out
,
'Beta1PowOut'
:
beta1_pow_out
,
'Beta2PowOut'
:
beta2_pow_out
}
# Verify output for this step
self
.
check_output_with_place
(
place
=
paddle
.
XPUPlace
(
0
),
atol
=
1e-2
)
# Verify output for this step
self
.
check_output_with_place
(
place
=
paddle
.
XPUPlace
(
0
),
atol
=
1e-2
)
# Output of this step becomes input for next step
self
.
inputs
[
'Param'
]
=
param_out
self
.
inputs
[
'Moment1'
]
=
moment1_out
self
.
inputs
[
'Moment2'
]
=
moment2_out
# Output of this step becomes input for next step
self
.
inputs
[
'Param'
]
=
param_out
self
.
inputs
[
'Moment1'
]
=
moment1_out
self
.
inputs
[
'Moment2'
]
=
moment2_out
# Update powers of Beta1 and Beta2 for next time step
self
.
inputs
[
'Beta1Pow'
]
=
beta1_pow_out
self
.
inputs
[
'Beta2Pow'
]
=
beta2_pow_out
# Update powers of Beta1 and Beta2 for next time step
self
.
inputs
[
'Beta1Pow'
]
=
beta1_pow_out
self
.
inputs
[
'Beta2Pow'
]
=
beta2_pow_out
# Randomize gradient for next step
self
.
inputs
[
'Grad'
]
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
# Randomize gradient for next step
self
.
inputs
[
'Grad'
]
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
def
adam_step
(
inputs
,
attributes
):
...
...
@@ -354,52 +366,9 @@ class TestSparseAdamOp(unittest.TestCase):
self
.
check_with_place
(
paddle
.
XPUPlace
(
0
),
False
)
class
TestAdamOpBetaVariable
(
OpTest
):
def
setUp
(
self
):
'''Test Adam Op with beta as Variable
'''
self
.
op_type
=
"adam"
param
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
grad
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
moment1
=
np
.
random
.
uniform
(
-
1
,
1
,
(
102
,
105
)).
astype
(
"float32"
)
# The second moment is positive
moment2
=
np
.
random
.
random
((
102
,
105
)).
astype
(
"float32"
)
beta1
=
0.85
beta2
=
0.95
learning_rate
=
0.001
epsilon
=
1e-8
beta1_pow
=
beta1
**
10
beta2_pow
=
beta2
**
10
self
.
inputs
=
{
'Param'
:
param
,
'Grad'
:
grad
,
'Moment1'
:
moment1
,
'Moment2'
:
moment2
,
'LearningRate'
:
np
.
array
([
learning_rate
]).
astype
(
"float32"
),
'Beta1Pow'
:
np
.
array
([
beta1_pow
]).
astype
(
"float32"
),
'Beta2Pow'
:
np
.
array
([
beta2_pow
]).
astype
(
"float32"
),
"Beta1Tensor"
:
np
.
array
([
beta1
]).
astype
(
"float32"
),
"Beta2Tensor"
:
np
.
array
([
beta2
]).
astype
(
"float32"
),
}
attributes
=
{
'epsilon'
:
epsilon
,
'beta1'
:
beta1
,
'beta2'
:
beta2
}
param_out
,
moment1_out
,
\
moment2_out
=
adam_step
(
self
.
inputs
,
attributes
)
self
.
outputs
=
{
'Moment1Out'
:
moment1_out
,
'Moment2Out'
:
moment2_out
,
'ParamOut'
:
param_out
,
'Beta1PowOut'
:
np
.
array
([
beta1_pow
]).
astype
(
"float32"
)
*
beta1
,
'Beta2PowOut'
:
np
.
array
([
beta2_pow
]).
astype
(
"float32"
)
*
beta2
}
def
test_check_output
(
self
):
self
.
check_output_with_place
(
place
=
paddle
.
XPUPlace
(
0
),
atol
=
1e-2
)
support_types
=
get_xpu_op_support_types
(
'adam'
)
for
stype
in
support_types
:
create_test_class
(
globals
(),
XPUTestAdamOp
,
stype
)
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/xpu/test_concat_op_xpu.py
浏览文件 @
c5179772
...
...
@@ -20,136 +20,131 @@ sys.path.append("..")
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
import
paddle.fluid
as
fluid
from
paddle.fluid
import
compiler
,
Program
,
program_guard
,
core
import
paddle
class
TestConcatOp
(
XPUOpTest
):
def
setUp
(
self
):
self
.
op_type
=
"concat"
self
.
dtype
=
self
.
get_dtype
()
self
.
use_xpu
=
True
self
.
use_mkldnn
=
False
self
.
init_test_data
()
self
.
inputs
=
{
'X'
:
[(
'x0'
,
self
.
x0
),
(
'x1'
,
self
.
x1
),
(
'x2'
,
self
.
x2
)]}
self
.
attrs
=
{
'axis'
:
self
.
axis
}
if
self
.
axis
<
0
:
self
.
actual_axis
=
self
.
axis
+
len
(
self
.
x0
.
shape
)
self
.
actual_axis
=
self
.
actual_axis
if
self
.
actual_axis
>
0
else
0
else
:
self
.
actual_axis
=
self
.
axis
self
.
outputs
=
{
'Out'
:
np
.
concatenate
(
(
self
.
x0
,
self
.
x1
,
self
.
x2
),
axis
=
self
.
actual_axis
)
}
def
get_dtype
(
self
):
return
"float32"
def
test_check_output
(
self
):
if
paddle
.
is_compiled_with_xpu
():
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_output_with_place
(
place
)
def
test_check_grad
(
self
):
if
paddle
.
is_compiled_with_xpu
():
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'x0'
],
'Out'
)
self
.
check_grad_with_place
(
place
,
[
'x1'
],
'Out'
)
self
.
check_grad_with_place
(
place
,
[
'x2'
],
'Out'
)
def
init_test_data
(
self
):
self
.
x0
=
np
.
random
.
random
((
5
,
1
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x1
=
np
.
random
.
random
((
5
,
2
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x2
=
np
.
random
.
random
((
5
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
axis
=
1
class
TestConcatOp2
(
TestConcatOp
):
def
init_test_data
(
self
):
self
.
x0
=
np
.
random
.
random
((
2
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x1
=
np
.
random
.
random
((
2
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x2
=
np
.
random
.
random
((
2
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
axis
=
1
@
skip_check_grad_ci
(
reason
=
"The function 'check_grad' for large inputs is too slow."
)
class
TestConcatOp3
(
TestConcatOp
):
def
init_test_data
(
self
):
self
.
x0
=
np
.
random
.
random
((
1
,
256
,
170
,
256
)).
astype
(
self
.
dtype
)
self
.
x1
=
np
.
random
.
random
((
1
,
128
,
170
,
256
)).
astype
(
self
.
dtype
)
self
.
x2
=
np
.
random
.
random
((
1
,
128
,
170
,
256
)).
astype
(
self
.
dtype
)
self
.
axis
=
1
def
test_check_grad
(
self
):
pass
@
skip_check_grad_ci
(
reason
=
"This test will meet fetch error when there is a null grad. The detailed information is in PR#17015."
)
class
TestConcatOp4
(
TestConcatOp
):
def
init_test_data
(
self
):
self
.
x0
=
np
.
random
.
random
((
2
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x1
=
np
.
random
.
random
((
2
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x2
=
np
.
random
.
random
((
0
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
axis
=
0
def
test_check_grad
(
self
):
pass
class
TestConcatOp5
(
TestConcatOp
):
def
init_test_data
(
self
):
self
.
x0
=
np
.
random
.
random
((
5
,
1
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x1
=
np
.
random
.
random
((
5
,
2
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x2
=
np
.
random
.
random
((
5
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
axis
=
-
3
class
TestConcatOp6
(
TestConcatOp
):
def
setUp
(
self
):
self
.
op_type
=
"concat"
self
.
dtype
=
self
.
get_dtype
()
self
.
init_test_data
()
self
.
lod
=
[[
20
,
80
]]
self
.
out_lod
=
[[
20
,
80
,
20
,
80
,
20
,
80
]]
self
.
inputs
=
{
'X'
:
[(
'x0'
,
(
self
.
x0
,
self
.
lod
)),
(
'x1'
,
(
self
.
x1
,
self
.
lod
)),
(
'x2'
,
(
self
.
x2
,
self
.
lod
))]
}
self
.
attrs
=
{
'axis'
:
self
.
axis
}
if
self
.
axis
<
0
:
self
.
actual_axis
=
self
.
axis
+
len
(
self
.
x0
.
shape
)
self
.
actual_axis
=
self
.
actual_axis
if
self
.
actual_axis
>
0
else
0
else
:
self
.
actual_axis
=
self
.
axis
out
=
np
.
concatenate
((
self
.
x0
,
self
.
x1
,
self
.
x2
),
axis
=
self
.
actual_axis
)
self
.
outputs
=
{
'Out'
:
(
out
,
self
.
out_lod
)}
def
test_check_output
(
self
):
if
paddle
.
is_compiled_with_xpu
():
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_output_with_place
(
place
,
check_dygraph
=
False
)
def
test_check_grad
(
self
):
if
paddle
.
is_compiled_with_xpu
():
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'x0'
],
'Out'
)
self
.
check_grad_with_place
(
place
,
[
'x1'
],
'Out'
)
self
.
check_grad_with_place
(
place
,
[
'x2'
],
'Out'
)
def
init_test_data
(
self
):
self
.
x0
=
np
.
random
.
random
([
100
]).
astype
(
self
.
dtype
)
self
.
x1
=
np
.
random
.
random
([
100
]).
astype
(
self
.
dtype
)
self
.
x2
=
np
.
random
.
random
([
100
]).
astype
(
self
.
dtype
)
self
.
axis
=
0
from
op_test
import
OpTest
,
skip_check_grad_ci
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
class
XPUTestConcatOp
(
XPUOpTestWrapper
):
def
__init__
(
self
):
self
.
op_name
=
'concat'
self
.
use_dynamic_create_class
=
False
class
TestConcatOp
(
XPUOpTest
):
def
setUp
(
self
):
self
.
set_xpu
()
self
.
op_type
=
"concat"
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
init_dtype
()
self
.
init_axis
()
self
.
set_inputs
()
self
.
inputs
=
{
'X'
:
[(
'x0'
,
self
.
x0
),
(
'x1'
,
self
.
x1
),
(
'x2'
,
self
.
x2
)]
}
self
.
attrs
=
{
'axis'
:
self
.
axis
}
if
self
.
axis
<
0
:
self
.
actual_axis
=
self
.
axis
+
len
(
self
.
x0
.
shape
)
self
.
actual_axis
=
self
.
actual_axis
if
self
.
actual_axis
>
0
else
0
else
:
self
.
actual_axis
=
self
.
axis
self
.
outputs
=
{
'Out'
:
np
.
concatenate
(
(
self
.
x0
,
self
.
x1
,
self
.
x2
),
axis
=
self
.
actual_axis
)
}
def
set_inputs
(
self
):
self
.
x0
=
np
.
random
.
random
((
2
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x1
=
np
.
random
.
random
((
2
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x2
=
np
.
random
.
random
((
2
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
def
set_xpu
(
self
):
self
.
__class__
.
use_xpu
=
True
self
.
__class__
.
no_need_check_grad
=
True
def
init_dtype
(
self
):
self
.
dtype
=
self
.
in_type
def
init_axis
(
self
):
self
.
axis
=
-
1
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
)
def
test_check_grad
(
self
):
if
paddle
.
is_compiled_with_xpu
():
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
[
'x0'
],
'Out'
)
self
.
check_grad_with_place
(
place
,
[
'x1'
],
'Out'
)
self
.
check_grad_with_place
(
place
,
[
'x2'
],
'Out'
)
class
TestConcatOpAxis0XPU
(
TestConcatOp
):
def
init_axis
(
self
):
self
.
axis
=
0
class
TestConcatOpAxis1XPU
(
TestConcatOp
):
def
set_inputs
(
self
):
self
.
x0
=
np
.
random
.
random
((
5
,
1
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x1
=
np
.
random
.
random
((
5
,
2
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x2
=
np
.
random
.
random
((
5
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
def
init_axis
(
self
):
self
.
axis
=
1
class
TestConcatOpAxis2XPU
(
TestConcatOp
):
def
init_axis
(
self
):
self
.
axis
=
2
class
TestConcatOpAxis3XPU
(
TestConcatOp
):
def
init_axis
(
self
):
self
.
axis
=
3
class
TestConcatOpAxisNeg1XPU
(
TestConcatOp
):
def
init_axis
(
self
):
self
.
axis
=
-
1
class
TestConcatOpAxisNeg2XPU
(
TestConcatOp
):
def
init_axis
(
self
):
self
.
axis
=
-
2
class
TestConcatOpAxisNeg3XPU
(
TestConcatOp
):
def
init_axis
(
self
):
self
.
axis
=
-
3
@
skip_check_grad_ci
(
reason
=
"The function 'check_grad' for large inputs is too slow."
)
class
TestConcatOp3
(
TestConcatOp
):
def
set_inputs
(
self
):
self
.
x0
=
np
.
random
.
random
((
1
,
256
,
170
,
256
)).
astype
(
self
.
dtype
)
self
.
x1
=
np
.
random
.
random
((
1
,
128
,
170
,
256
)).
astype
(
self
.
dtype
)
self
.
x2
=
np
.
random
.
random
((
1
,
128
,
170
,
256
)).
astype
(
self
.
dtype
)
self
.
axis
=
1
def
test_check_grad
(
self
):
pass
@
skip_check_grad_ci
(
reason
=
"This test will meet fetch error when there is a null grad. The detailed information is in PR#17015."
)
class
TestConcatOp4
(
TestConcatOp
):
def
set_inputs
(
self
):
self
.
x0
=
np
.
random
.
random
((
2
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x1
=
np
.
random
.
random
((
2
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
x2
=
np
.
random
.
random
((
0
,
3
,
4
,
5
)).
astype
(
self
.
dtype
)
self
.
axis
=
0
def
test_check_grad
(
self
):
pass
support_types
=
get_xpu_op_support_types
(
'concat'
)
for
stype
in
support_types
:
create_test_class
(
globals
(),
XPUTestConcatOp
,
stype
)
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录