Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
139a30ec
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
139a30ec
编写于
3月 25, 2022
作者:
Z
Zhangjingyu06
提交者:
GitHub
3月 25, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
modify unit test in bn, stack and split. *test=kunlun (#40880)
上级
04087012
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
437 addition
and
485 deletion
+437
-485
paddle/fluid/operators/batch_norm_op_xpu.cc
paddle/fluid/operators/batch_norm_op_xpu.cc
+75
-96
python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py
...addle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py
+198
-195
python/paddle/fluid/tests/unittests/xpu/test_split_op_xpu.py
python/paddle/fluid/tests/unittests/xpu/test_split_op_xpu.py
+66
-100
python/paddle/fluid/tests/unittests/xpu/test_stack_op_xpu.py
python/paddle/fluid/tests/unittests/xpu/test_stack_op_xpu.py
+98
-94
未找到文件。
paddle/fluid/operators/batch_norm_op_xpu.cc
浏览文件 @
139a30ec
/* Copyright (c) 20
16
PaddlePaddle Authors. All Rights Reserved.
/* Copyright (c) 20
22
PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
...
...
@@ -38,25 +38,15 @@ class BatchNormXPUKernel : public framework::OpKernel<T> {
bool
global_stats
=
test_mode
||
use_global_stats
;
const
auto
&
data_layout_str
=
ctx
.
Attr
<
std
::
string
>
(
"data_layout"
);
const
auto
data_layout
=
framework
::
StringToDataLayout
(
data_layout_str
);
PADDLE_ENFORCE_EQ
(
data_layout_str
==
"NCHW"
||
data_layout_str
==
"NHWC"
,
true
,
platform
::
errors
::
InvalidArgument
(
"The 'data_layout' attribute must be NCHW or NHWC. "
"But recevived 'data_layout' is [%s]."
,
data_layout_str
));
const
auto
*
x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
const
auto
&
x_dims
=
x
->
dims
();
PADDLE_ENFORCE_EQ
(
x_dims
.
size
()
>=
2
&&
x_dims
.
size
()
<=
5
,
true
,
platform
::
errors
::
InvalidArgument
(
"The size of input's dimensions should be between 2 and 5"
"But received: the size of input's dimensions is [%d]"
,
x_dims
.
size
()));
int
N
,
C
,
H
,
W
,
D
;
ExtractNCWHD
(
x_dims
,
data_layout
,
&
N
,
&
C
,
&
H
,
&
W
,
&
D
);
int
temp
=
x_dims
[
3
];
temp
=
(
x_dims
.
size
()
!=
4
)
?
1
:
temp
;
bool
is_nchw
=
(
data_layout
==
DataLayout
::
kNCHW
);
const
int
N
=
x_dims
[
0
];
const
int
C
=
is_nchw
?
x_dims
[
1
]
:
temp
;
const
int
H
=
is_nchw
?
x_dims
[
2
]
:
x_dims
[
1
];
const
int
W
=
is_nchw
?
temp
:
x_dims
[
2
];
const
auto
*
scale
=
ctx
.
Input
<
Tensor
>
(
"Scale"
);
const
auto
*
bias
=
ctx
.
Input
<
Tensor
>
(
"Bias"
);
const
auto
*
x_data
=
x
->
data
<
T
>
();
...
...
@@ -77,7 +67,6 @@ class BatchNormXPUKernel : public framework::OpKernel<T> {
saved_variance
->
mutable_data
<
float
>
(
ctx
.
GetPlace
());
auto
&
dev_ctx
=
ctx
.
template
device_context
<
DeviceContext
>();
bool
is_nchw
=
data_layout_str
==
"NCHW"
;
if
(
!
global_stats
)
{
auto
*
mean_out_data
=
mean_out
->
data
<
float
>
();
...
...
@@ -94,15 +83,27 @@ class BatchNormXPUKernel : public framework::OpKernel<T> {
&
mom_cpu
);
momentum
=
mom_tensor
->
data
<
float
>
()[
0
];
}
int
r
=
xpu
::
batch_norm
<
T
>
(
dev_ctx
.
x_context
(),
x_data
,
y_data
,
N
,
C
,
H
,
W
,
epsilon
,
momentum
,
scale_data
,
bias_data
,
saved_mean_data
,
saved_variance_data
,
mean_out_data
,
variance_out_data
,
is_nchw
);
PADDLE_ENFORCE_EQ
(
r
,
xpu
::
Error_t
::
SUCCESS
,
platform
::
errors
::
External
(
"The batch_norm XPU API return wrong value[%d %s]"
,
r
,
XPUAPIErrorMsg
[
r
]));
if
(
C
==
1
)
{
int
r
=
xpu
::
batch_norm
<
T
>
(
dev_ctx
.
x_context
(),
x_data
,
y_data
,
N
,
1
,
H
,
W
,
epsilon
,
momentum
,
scale_data
,
bias_data
,
saved_mean_data
,
saved_variance_data
,
mean_out_data
,
variance_out_data
,
true
);
PADDLE_ENFORCE_EQ
(
r
,
xpu
::
Error_t
::
SUCCESS
,
platform
::
errors
::
External
(
"The batch_norm XPU API return wrong value[%d %s]"
,
r
,
XPUAPIErrorMsg
[
r
]));
}
else
{
int
r
=
xpu
::
batch_norm
<
T
>
(
dev_ctx
.
x_context
(),
x_data
,
y_data
,
N
,
C
,
H
,
W
,
epsilon
,
momentum
,
scale_data
,
bias_data
,
saved_mean_data
,
saved_variance_data
,
mean_out_data
,
variance_out_data
,
is_nchw
);
PADDLE_ENFORCE_EQ
(
r
,
xpu
::
Error_t
::
SUCCESS
,
platform
::
errors
::
External
(
"The batch_norm XPU API return wrong value[%d %s]"
,
r
,
XPUAPIErrorMsg
[
r
]));
}
}
else
{
const
auto
*
mean
=
ctx
.
Input
<
Tensor
>
(
"Mean"
);
const
auto
*
variance
=
ctx
.
Input
<
Tensor
>
(
"Variance"
);
...
...
@@ -110,7 +111,7 @@ class BatchNormXPUKernel : public framework::OpKernel<T> {
const
auto
*
variance_data
=
variance
->
data
<
float
>
();
int
r
=
xpu
::
batch_norm_infer
(
dev_ctx
.
x_context
(),
x_data
,
y_data
,
N
,
C
,
H
,
W
,
epsilon
,
scale_data
,
bias_data
,
mean_data
,
variance_data
,
is_nchw
);
mean_data
,
variance_data
,
true
);
PADDLE_ENFORCE_EQ
(
r
,
xpu
::
Error_t
::
SUCCESS
,
platform
::
errors
::
External
(
...
...
@@ -171,13 +172,6 @@ class BatchNormGradXPUKernel : public framework::OpKernel<T> {
const
float
epsilon
=
ctx
.
Attr
<
float
>
(
"epsilon"
);
const
auto
data_layout
=
framework
::
StringToDataLayout
(
data_layout_str
);
PADDLE_ENFORCE_EQ
(
data_layout_str
==
"NCHW"
||
data_layout_str
==
"NHWC"
,
true
,
platform
::
errors
::
InvalidArgument
(
"The 'data_layout' attribute must be NCHW or NHWC. "
"But recevived 'data_layout' is [%s]."
,
data_layout_str
));
auto
*
d_x
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
d_scale
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Scale"
));
auto
*
d_bias
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Bias"
));
...
...
@@ -210,15 +204,13 @@ class BatchNormGradXPUKernel : public framework::OpKernel<T> {
}
const
auto
&
x_dims
=
x
->
dims
();
PADDLE_ENFORCE_EQ
(
x_dims
.
size
()
>=
2
&&
x_dims
.
size
()
<=
5
,
true
,
platform
::
errors
::
InvalidArgument
(
"The size of input's dimensions should be between 2 and 5"
"But received: the size of input's dimensions is [%d]"
,
x_dims
.
size
()));
int
N
,
C
,
H
,
W
,
D
;
ExtractNCWHD
(
x_dims
,
data_layout
,
&
N
,
&
C
,
&
H
,
&
W
,
&
D
);
int
temp
=
x_dims
[
3
];
temp
=
(
x_dims
.
size
()
!=
4
)
?
1
:
temp
;
bool
is_nchw
=
(
data_layout
==
DataLayout
::
kNCHW
);
const
int
N
=
x_dims
[
0
];
const
int
C
=
is_nchw
?
x_dims
[
1
]
:
temp
;
const
int
H
=
is_nchw
?
x_dims
[
2
]
:
x_dims
[
1
];
const
int
W
=
is_nchw
?
temp
:
x_dims
[
2
];
const
auto
*
x_data
=
x
->
data
<
T
>
();
const
auto
*
d_y_data
=
d_y
->
data
<
T
>
();
...
...
@@ -243,45 +235,42 @@ class BatchNormGradXPUKernel : public framework::OpKernel<T> {
"the size of scale's dimensions is [%d], the dimensions of scale "
"is [%s]."
,
scale
->
dims
().
size
(),
scale
->
dims
()));
PADDLE_ENFORCE_EQ
(
scale
->
dims
()[
0
],
C
,
platform
::
errors
::
InvalidArgument
(
"The first dimension of scale must equal to Channels[%d]. But "
"received: the first dimension of scale is [%d]"
,
C
,
scale
->
dims
()[
0
]));
auto
&
dev_ctx
=
ctx
.
template
device_context
<
DeviceContext
>();
xpu
::
ctx_guard
RAII_GUARD
(
dev_ctx
.
x_context
());
const
auto
*
batch_mean
=
ctx
.
Input
<
Tensor
>
(
"SavedMean"
);
const
auto
*
batch_inv_std
=
ctx
.
Input
<
Tensor
>
(
"SavedVariance"
);
const
auto
*
global_mean
=
ctx
.
Input
<
Tensor
>
(
"Mean"
);
const
auto
*
global_var
=
ctx
.
Input
<
Tensor
>
(
"Variance"
);
const
T
*
mean_data
=
nullptr
;
const
T
*
inv_var_data
=
nullptr
;
// TODO(guozibin): hadle the situation case of N * H * W = 1
if
(
!
use_global_stats
)
{
const
auto
*
saved_mean
=
ctx
.
Input
<
Tensor
>
(
"SavedMean"
);
// SavedVariance have been reverted in forward operator
const
auto
*
saved_inv_variance
=
ctx
.
Input
<
Tensor
>
(
"SavedVariance"
);
mean_data
=
saved_mean
->
data
<
float
>
();
inv_var_data
=
saved_inv_variance
->
data
<
float
>
();
}
else
{
const
auto
*
running_mean
=
ctx
.
Input
<
Tensor
>
(
"Mean"
);
const
auto
*
running_variance
=
ctx
.
Input
<
Tensor
>
(
"Variance"
);
mean_data
=
running_mean
->
data
<
float
>
();
inv_var_data
=
running_variance
->
data
<
float
>
();
float
*
running_inv_var_data
=
RAII_GUARD
.
alloc_l3_or_gm
<
float
>
(
running_variance
->
numel
());
float
*
epsilon_data
=
RAII_GUARD
.
alloc_l3_or_gm
<
float
>
(
1
);
int
r1
=
calculate_inv_var
(
dev_ctx
.
x_context
(),
inv_var_data
,
epsilon
,
C
,
epsilon_data
,
running_inv_var_data
);
PADDLE_ENFORCE_EQ
(
r1
,
XPU_SUCCESS
,
platform
::
errors
::
External
(
"XPU API(batch_norm_grad "
"calculate_inv_var function) "
"return wrong value[%d %s]"
,
r1
,
XPUAPIErrorMsg
[
r1
]));
inv_var_data
=
running_inv_var_data
;
}
if
(
is_inplace
)
{
float
*
global_inv_std_data
;
if
(
use_global_stats
)
{
global_inv_std_data
=
RAII_GUARD
.
alloc_l3_or_gm
<
float
>
(
global_var
->
numel
());
float
*
epsilon_data
=
RAII_GUARD
.
alloc_l3_or_gm
<
float
>
(
1
);
int
r1
=
calculate_inv_var
(
dev_ctx
.
x_context
(),
global_var
->
data
<
float
>
(),
epsilon
,
C
,
epsilon_data
,
global_inv_std_data
);
PADDLE_ENFORCE_EQ
(
r1
,
XPU_SUCCESS
,
platform
::
errors
::
External
(
"XPU API(batch_norm_grad "
"calculate_inv_var function) "
"return wrong value[%d %s]"
,
r1
,
XPUAPIErrorMsg
[
r1
]));
}
auto
px
=
*
x
;
auto
*
inv_std_data
=
use_global_stats
?
global_inv_std_data
:
batch_inv_std
->
data
<
float
>
();
auto
mean_data
=
use_global_stats
?
global_mean
->
data
<
float
>
()
:
batch_mean
->
data
<
float
>
();
int
r2
=
calculate_inv_BN_Y
(
dev_ctx
.
x_context
(),
px
.
mutable_data
<
T
>
(
ctx
.
GetPlace
()),
scale
->
data
<
float
>
(),
bias
->
data
<
float
>
(),
mean_data
,
inv_
std
_data
,
N
,
scale
->
data
<
float
>
(),
bias
->
data
<
float
>
(),
mean_data
,
inv_
var
_data
,
N
,
C
,
H
*
W
,
x
->
data
<
T
>
());
PADDLE_ENFORCE_EQ
(
r2
,
XPU_SUCCESS
,
platform
::
errors
::
External
(
"XPU API(batch_norm_grad "
...
...
@@ -289,29 +278,19 @@ class BatchNormGradXPUKernel : public framework::OpKernel<T> {
"return wrong value[%d %s]"
,
r2
,
XPUAPIErrorMsg
[
r2
]));
}
int
r3
;
bool
is_nchw
=
data_layout_str
==
"NCHW"
;
if
(
use_global_stats
)
{
r3
=
xpu
::
batch_norm_grad
<
T
>
(
dev_ctx
.
x_context
(),
x_data
,
d_y_data
,
d_x_data
,
N
,
C
,
H
,
W
,
scale_data
,
nullptr
,
nullptr
,
d_scale_data
,
d_bias_data
,
is_nchw
,
global_mean
->
data
<
float
>
(),
global_var
->
data
<
float
>
(),
epsilon
);
}
else
{
if
(
!
d_x
)
{
d_x_data
=
RAII_GUARD
.
alloc_l3_or_gm
<
T
>
(
x
->
numel
());
}
if
(
!
d_scale
)
{
d_scale_data
=
RAII_GUARD
.
alloc_l3_or_gm
<
float
>
(
C
);
}
if
(
!
d_bias_data
)
{
d_bias_data
=
RAII_GUARD
.
alloc_l3_or_gm
<
float
>
(
C
);
}
r3
=
xpu
::
batch_norm_grad
<
T
>
(
dev_ctx
.
x_context
(),
x_data
,
d_y_data
,
d_x_data
,
N
,
C
,
H
,
W
,
scale_data
,
batch_mean
->
data
<
float
>
(),
batch_inv_std
->
data
<
float
>
(),
d_scale_data
,
d_bias_data
,
is_nchw
);
if
(
!
d_x
)
{
d_x_data
=
RAII_GUARD
.
alloc_l3_or_gm
<
T
>
(
x
->
numel
());
}
if
(
!
d_scale
)
{
d_scale_data
=
RAII_GUARD
.
alloc_l3_or_gm
<
float
>
(
C
);
}
if
(
!
d_bias_data
)
{
d_bias_data
=
RAII_GUARD
.
alloc_l3_or_gm
<
float
>
(
C
);
}
int
r3
=
xpu
::
batch_norm_grad
<
T
>
(
dev_ctx
.
x_context
(),
x_data
,
d_y_data
,
d_x_data
,
N
,
C
,
H
,
W
,
scale_data
,
mean_data
,
inv_var_data
,
d_scale_data
,
d_bias_data
,
is_nchw
);
PADDLE_ENFORCE_EQ
(
r3
,
XPU_SUCCESS
,
platform
::
errors
::
External
(
"XPU API(batch_norm_grad) return "
"wrong value[%d %s]"
,
...
...
python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py
浏览文件 @
139a30ec
#
Copyright (c) 2020
PaddlePaddle Authors. All Rights Reserved.
#
Copyright (c) 2022
PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -26,6 +26,10 @@ import paddle.fluid as fluid
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.fluid
import
compiler
,
Program
,
program_guard
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
def
ref_batch_norm_infer
(
x
,
scale
,
bias
,
mean
,
variance
,
momentum
,
epsilon
,
...
...
@@ -121,213 +125,212 @@ def ref_batch_norm_train(x, y_grad, scale, bias, mean, variance, momentum,
return
y
,
mean_out
,
variance_out
,
saved_mean
,
saved_inv_std
,
x_grad
,
scale_grad
,
bias_grad
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestXPUBatchNormOp
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
place
=
paddle
.
XPUPlace
(
0
)
self
.
op_type
=
"batch_norm"
self
.
dtype
=
np
.
float32
self
.
shape
=
[
2
,
3
,
4
,
5
]
self
.
data_layout
=
"NCHW"
self
.
epsilon
=
1e-05
self
.
momentum
=
0.9
self
.
set_attrs
()
if
self
.
data_layout
==
"NHWC"
:
channel_size
=
self
.
shape
[
3
]
elif
self
.
data_layout
==
"NCHW"
:
channel_size
=
self
.
shape
[
1
]
else
:
raise
ValueError
(
"Unsupported data layout! Only NCHW and NHWC is supported, but received "
+
self
.
data_layout
)
np
.
random
.
seed
(
1024
)
self
.
x_np
=
np
.
random
.
random_sample
(
self
.
shape
).
astype
(
self
.
dtype
)
self
.
scale_np
=
np
.
random
.
random_sample
(
[
channel_size
]).
astype
(
self
.
dtype
)
self
.
bias_np
=
np
.
random
.
random_sample
(
[
channel_size
]).
astype
(
self
.
dtype
)
self
.
mean_np
=
np
.
zeros
([
channel_size
]).
astype
(
self
.
dtype
)
self
.
variance_np
=
np
.
ones
([
channel_size
]).
astype
(
self
.
dtype
)
self
.
saved_mean_np
=
np
.
zeros
([
channel_size
]).
astype
(
self
.
dtype
)
self
.
saved_variance_np
=
np
.
ones
([
channel_size
]).
astype
(
self
.
dtype
)
def
set_attrs
(
self
):
pass
def
test_infer
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
scale
=
paddle
.
fluid
.
data
(
'Scale'
,
self
.
scale_np
.
shape
,
self
.
scale_np
.
dtype
)
bias
=
paddle
.
fluid
.
data
(
'Bias'
,
self
.
bias_np
.
shape
,
self
.
bias_np
.
dtype
)
mean
=
paddle
.
fluid
.
data
(
'Mean'
,
self
.
mean_np
.
shape
,
self
.
mean_np
.
dtype
)
variance
=
paddle
.
fluid
.
data
(
'Variance'
,
self
.
variance_np
.
shape
,
self
.
variance_np
.
dtype
)
y
=
F
.
batch_norm
(
x
,
mean
,
variance
,
scale
,
bias
,
False
,
self
.
momentum
,
self
.
epsilon
,
self
.
data_layout
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
[
y_np
]
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
,
'Scale'
:
self
.
scale_np
,
'Bias'
:
self
.
bias_np
,
'Mean'
:
self
.
mean_np
,
'Variance'
:
self
.
variance_np
},
fetch_list
=
[
y
])
y_np_ref
=
ref_batch_norm_infer
(
self
.
x_np
,
self
.
scale_np
,
self
.
bias_np
,
self
.
mean_np
,
self
.
variance_np
,
self
.
momentum
,
self
.
epsilon
,
self
.
data_layout
)
self
.
assertEqual
(
np
.
allclose
(
y_np_ref
,
y_np
),
True
)
class
XPUTestBatchNormOp
(
XPUOpTestWrapper
):
def
__init__
(
self
):
self
.
op_name
=
'batch_norm'
self
.
use_dynamic_create_class
=
False
def
test_train
(
self
):
y_grad_np
=
np
.
random
.
random_sample
(
self
.
shape
).
astype
(
self
.
dtype
)
y_np
,
mean_out_np
,
variance_out_np
,
saved_mean_np
,
saved_variance_np
,
x_grad_np
,
scale_grad_np
,
bias_grad_np
=
ref_batch_norm_train
(
self
.
x_np
,
y_grad_np
,
self
.
scale_np
,
self
.
bias_np
,
self
.
mean_np
,
self
.
variance_np
,
self
.
momentum
,
self
.
epsilon
,
self
.
data_layout
)
inputs
=
{
'X'
:
self
.
x_np
,
'Scale'
:
self
.
scale_np
,
'Bias'
:
self
.
bias_np
,
'Mean'
:
self
.
mean_np
,
'Variance'
:
self
.
variance_np
,
'Y@GRAD'
:
y_grad_np
}
outputs
=
{
'Y'
:
y_np
,
'Mean'
:
mean_out_np
,
'Variance'
:
variance_out_np
,
'SavedMean'
:
saved_mean_np
,
'SavedVariance'
:
saved_variance_np
,
'X@GRAD'
:
x_grad_np
,
'Scale@GRAD'
:
scale_grad_np
,
'Bias@GRAD'
:
bias_grad_np
}
attrs
=
{
'momentum'
:
self
.
momentum
,
'epsilon'
:
self
.
epsilon
,
'is_test'
:
False
,
'data_layout'
:
self
.
data_layout
,
'use_mkldnn'
:
False
,
'fuse_with_relu'
:
False
,
'use_global_stats'
:
False
,
}
paddle
.
enable_static
()
program
=
paddle
.
static
.
Program
()
with
paddle
.
static
.
program_guard
(
program
):
block
=
program
.
global_block
()
# Set inputs, outputs and attributes to the forward op of batch_norm
input_vars
=
{}
for
var_name
in
inputs
:
arg_name
=
var_name
np_value
=
inputs
[
var_name
]
if
not
block
.
has_var
(
var_name
):
block
.
create_var
(
name
=
var_name
,
shape
=
np_value
.
shape
,
dtype
=
np_value
.
dtype
)
input_vars
[
arg_name
]
=
block
.
var
(
var_name
)
fetch_list
=
[]
output_vars
=
{}
for
var_name
in
outputs
:
arg_name
=
var_name
np_value
=
outputs
[
var_name
]
if
not
block
.
has_var
(
var_name
):
block
.
create_var
(
name
=
var_name
,
shape
=
np_value
.
shape
,
dtype
=
np_value
.
dtype
)
if
var_name
==
'Mean'
:
arg_name
=
'MeanOut'
# Share memory
if
var_name
==
'Variance'
:
arg_name
=
'VarianceOut'
# Share memory
output_vars
[
arg_name
]
=
block
.
var
(
var_name
)
fetch_list
.
append
(
var_name
)
batch_norm_op
=
block
.
append_op
(
type
=
"batch_norm"
,
inputs
=
input_vars
,
outputs
=
output_vars
,
attrs
=
attrs
)
# Generate the backward op_desc of batch_norm
grad_op_desc_list
,
op_grad_to_var
=
core
.
get_grad_op_desc
(
batch_norm_op
.
desc
,
set
(),
[])
grad_op_desc
=
grad_op_desc_list
[
0
]
new_op_desc
=
block
.
desc
.
append_op
()
new_op_desc
.
copy_from
(
grad_op_desc
)
program
.
_sync_with_cpp
()
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
outs
=
exe
.
run
(
program
,
feed
=
inputs
,
fetch_list
=
fetch_list
)
for
id
,
name
in
enumerate
(
fetch_list
):
self
.
assertEqual
(
np
.
allclose
(
outputs
[
name
],
outs
[
id
],
atol
=
1e-4
),
True
)
@
unittest
.
skipIf
(
not
paddle
.
is_compiled_with_xpu
(),
"core is not compiled with XPU"
)
class
TestBatchNormOp
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
op_type
=
"batch_norm"
self
.
dtype
=
np
.
float32
self
.
shape
=
[
2
,
3
,
4
,
5
]
self
.
data_layout
=
"NCHW"
self
.
epsilon
=
1e-05
self
.
momentum
=
0.9
self
.
init_dtype
()
self
.
set_xpu
()
self
.
set_attrs
()
if
self
.
data_layout
==
"NHWC"
:
channel_size
=
self
.
shape
[
3
]
elif
self
.
data_layout
==
"NCHW"
:
channel_size
=
self
.
shape
[
1
]
else
:
raise
ValueError
(
"Unsupported data layout! Only NCHW and NHWC is supported, but received "
+
self
.
data_layout
)
np
.
random
.
seed
(
1024
)
self
.
x_np
=
np
.
random
.
random_sample
(
self
.
shape
).
astype
(
self
.
dtype
)
self
.
scale_np
=
np
.
random
.
random_sample
(
[
channel_size
]).
astype
(
self
.
dtype
)
self
.
bias_np
=
np
.
random
.
random_sample
(
[
channel_size
]).
astype
(
self
.
dtype
)
self
.
mean_np
=
np
.
zeros
([
channel_size
]).
astype
(
self
.
dtype
)
self
.
variance_np
=
np
.
ones
([
channel_size
]).
astype
(
self
.
dtype
)
self
.
saved_mean_np
=
np
.
zeros
([
channel_size
]).
astype
(
self
.
dtype
)
self
.
saved_variance_np
=
np
.
ones
([
channel_size
]).
astype
(
self
.
dtype
)
class
TestXPUBatchNormOpUseGlobalStats
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
places
=
[
paddle
.
XPUPlace
(
0
)]
self
.
init_test
()
def
set_attrs
(
self
):
pass
### train mode
def
init_test
(
self
):
self
.
use_global_stats
=
True
self
.
trainable_statistics
=
False
def
init_dtype
(
self
):
self
.
dtype
=
self
.
in_type
def
test_global_stats
(
self
):
for
p
in
self
.
places
:
with
fluid
.
dygraph
.
guard
(
p
):
x
=
paddle
.
randn
([
2
,
6
,
6
,
4
])
net1
=
paddle
.
fluid
.
dygraph
.
BatchNorm
(
6
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
1.0
)),
use_global_stats
=
self
.
use_global_stats
,
trainable_statistics
=
self
.
trainable_statistics
)
net2
=
paddle
.
nn
.
BatchNorm2D
(
6
,
use_global_stats
=
self
.
use_global_stats
)
net2
.
weight
=
net1
.
weight
net2
.
bias
=
net1
.
bias
if
self
.
trainable_statistics
==
True
:
net1
.
training
=
False
net2
.
training
=
False
y1
=
net1
(
x
)
y2
=
net2
(
x
)
self
.
assertEqual
(
np
.
allclose
(
y1
.
numpy
(),
y2
.
numpy
(),
atol
=
1e-4
),
True
)
def
set_xpu
(
self
):
self
.
__class__
.
use_xpu
=
True
self
.
__class__
.
op_type
=
self
.
in_type
self
.
place
=
paddle
.
XPUPlace
(
0
)
def
test_infer
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
'X'
,
self
.
x_np
.
shape
,
self
.
x_np
.
dtype
)
scale
=
paddle
.
fluid
.
data
(
'Scale'
,
self
.
scale_np
.
shape
,
self
.
scale_np
.
dtype
)
bias
=
paddle
.
fluid
.
data
(
'Bias'
,
self
.
bias_np
.
shape
,
self
.
bias_np
.
dtype
)
mean
=
paddle
.
fluid
.
data
(
'Mean'
,
self
.
mean_np
.
shape
,
self
.
mean_np
.
dtype
)
variance
=
paddle
.
fluid
.
data
(
'Variance'
,
self
.
variance_np
.
shape
,
self
.
variance_np
.
dtype
)
y
=
F
.
batch_norm
(
x
,
mean
,
variance
,
scale
,
bias
,
False
,
self
.
momentum
,
self
.
epsilon
,
self
.
data_layout
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
[
y_np
]
=
exe
.
run
(
feed
=
{
'X'
:
self
.
x_np
,
'Scale'
:
self
.
scale_np
,
'Bias'
:
self
.
bias_np
,
'Mean'
:
self
.
mean_np
,
'Variance'
:
self
.
variance_np
},
fetch_list
=
[
y
])
y_np_ref
=
ref_batch_norm_infer
(
self
.
x_np
,
self
.
scale_np
,
self
.
bias_np
,
self
.
mean_np
,
self
.
variance_np
,
self
.
momentum
,
self
.
epsilon
,
self
.
data_layout
)
self
.
assertEqual
(
np
.
allclose
(
y_np_ref
,
y_np
),
True
)
class
TestXPUBatchNormUseGlobalStatsCase1
(
TestXPUBatchNormOpUseGlobalStats
):
### test mode
def
init_test
(
self
):
self
.
use_global_stats
=
False
self
.
trainable_statistics
=
True
def
test_train
(
self
):
y_grad_np
=
np
.
random
.
random_sample
(
self
.
shape
).
astype
(
self
.
dtype
)
y_np
,
mean_out_np
,
variance_out_np
,
saved_mean_np
,
saved_variance_np
,
x_grad_np
,
scale_grad_np
,
bias_grad_np
=
ref_batch_norm_train
(
self
.
x_np
,
y_grad_np
,
self
.
scale_np
,
self
.
bias_np
,
self
.
mean_np
,
self
.
variance_np
,
self
.
momentum
,
self
.
epsilon
,
self
.
data_layout
)
inputs
=
{
'X'
:
self
.
x_np
,
'Scale'
:
self
.
scale_np
,
'Bias'
:
self
.
bias_np
,
'Mean'
:
self
.
mean_np
,
'Variance'
:
self
.
variance_np
,
'Y@GRAD'
:
y_grad_np
}
outputs
=
{
'Y'
:
y_np
,
'Mean'
:
mean_out_np
,
'Variance'
:
variance_out_np
,
'SavedMean'
:
saved_mean_np
,
'SavedVariance'
:
saved_variance_np
,
'X@GRAD'
:
x_grad_np
,
'Scale@GRAD'
:
scale_grad_np
,
'Bias@GRAD'
:
bias_grad_np
}
attrs
=
{
'momentum'
:
self
.
momentum
,
'epsilon'
:
self
.
epsilon
,
'is_test'
:
False
,
'data_layout'
:
self
.
data_layout
,
'use_mkldnn'
:
False
,
'fuse_with_relu'
:
False
,
'use_global_stats'
:
False
,
}
paddle
.
enable_static
()
program
=
paddle
.
static
.
Program
()
with
paddle
.
static
.
program_guard
(
program
):
block
=
program
.
global_block
()
# Set inputs, outputs and attributes to the forward op of batch_norm
input_vars
=
{}
for
var_name
in
inputs
:
arg_name
=
var_name
np_value
=
inputs
[
var_name
]
if
not
block
.
has_var
(
var_name
):
block
.
create_var
(
name
=
var_name
,
shape
=
np_value
.
shape
,
dtype
=
np_value
.
dtype
)
input_vars
[
arg_name
]
=
block
.
var
(
var_name
)
fetch_list
=
[]
output_vars
=
{}
for
var_name
in
outputs
:
arg_name
=
var_name
np_value
=
outputs
[
var_name
]
if
not
block
.
has_var
(
var_name
):
block
.
create_var
(
name
=
var_name
,
shape
=
np_value
.
shape
,
dtype
=
np_value
.
dtype
)
if
var_name
==
'Mean'
:
arg_name
=
'MeanOut'
# Share memory
if
var_name
==
'Variance'
:
arg_name
=
'VarianceOut'
# Share memory
output_vars
[
arg_name
]
=
block
.
var
(
var_name
)
fetch_list
.
append
(
var_name
)
batch_norm_op
=
block
.
append_op
(
type
=
"batch_norm"
,
inputs
=
input_vars
,
outputs
=
output_vars
,
attrs
=
attrs
)
# Generate the backward op_desc of batch_norm
grad_op_desc_list
,
op_grad_to_var
=
core
.
get_grad_op_desc
(
batch_norm_op
.
desc
,
set
(),
[])
grad_op_desc
=
grad_op_desc_list
[
0
]
new_op_desc
=
block
.
desc
.
append_op
()
new_op_desc
.
copy_from
(
grad_op_desc
)
program
.
_sync_with_cpp
()
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
outs
=
exe
.
run
(
program
,
feed
=
inputs
,
fetch_list
=
fetch_list
)
for
id
,
name
in
enumerate
(
fetch_list
):
self
.
assertEqual
(
np
.
allclose
(
outputs
[
name
],
outs
[
id
],
atol
=
1e-4
),
True
)
class
TestBatchNormOpUseGlobalStats
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
places
=
[
paddle
.
XPUPlace
(
0
)]
self
.
init_test
()
class
TestXPUBatchNormUseGlobalStatsCase2
(
TestXPUBatchNormOpUseGlobalStats
):
### train mode
def
init_test
(
self
):
self
.
use_global_stats
=
False
self
.
trainable_statistics
=
False
### train mode
def
init_test
(
self
):
self
.
use_global_stats
=
True
self
.
trainable_statistics
=
False
def
test_global_stats
(
self
):
for
p
in
self
.
places
:
with
fluid
.
dygraph
.
guard
(
p
):
x
=
paddle
.
randn
([
2
,
6
,
6
,
4
])
net1
=
paddle
.
fluid
.
dygraph
.
BatchNorm
(
6
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
1.0
)),
use_global_stats
=
self
.
use_global_stats
,
trainable_statistics
=
self
.
trainable_statistics
)
net2
=
paddle
.
nn
.
BatchNorm2D
(
6
,
use_global_stats
=
self
.
use_global_stats
)
net2
.
weight
=
net1
.
weight
net2
.
bias
=
net1
.
bias
if
self
.
trainable_statistics
==
True
:
net1
.
training
=
False
net2
.
training
=
False
y1
=
net1
(
x
)
y2
=
net2
(
x
)
self
.
assertEqual
(
np
.
allclose
(
y1
.
numpy
(),
y2
.
numpy
()),
True
)
class
TestXPUBatchNormUseGlobalStatsCase3
(
TestXPU
BatchNormOpUseGlobalStats
):
### test mode
def
init_test
(
self
):
self
.
use_global_stats
=
True
self
.
trainable_statistics
=
True
class
TestBatchNormOpUseGlobalStats1
(
Test
BatchNormOpUseGlobalStats
):
### test mode
def
init_test
(
self
):
self
.
use_global_stats
=
True
self
.
trainable_statistics
=
True
class
TestBatchNormUseGlobalStats2
(
TestBatchNormOpUseGlobalStats
):
### train mode
def
init_test
(
self
):
self
.
use_global_stats
=
True
self
.
trainable_statistics
=
False
class
TestXPUBatchNormUseGlobalStatsCase4
(
TestXPUBatchNormOpUseGlobalStats
):
### train mode
def
init_test
(
self
):
self
.
use_global_stats
=
True
self
.
trainable_statistics
=
False
support_types
=
get_xpu_op_support_types
(
'batch_norm'
)
for
stype
in
support_types
:
create_test_class
(
globals
(),
XPUTestBatchNormOp
,
stype
)
if
__name__
==
"__main__"
:
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/xpu/test_split_op_xpu.py
浏览文件 @
139a30ec
# Copyright (c) 202
1
PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 202
2
PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -23,105 +23,71 @@ from op_test_xpu import XPUOpTest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
program_guard
# test with attr(num)
class
TestSplitOp
(
XPUOpTest
):
def
initDefaultParameters
(
self
):
self
.
dtype
=
'float32'
self
.
x
=
np
.
random
.
random
((
4
,
5
,
6
)).
astype
(
self
.
dtype
)
self
.
axis
=
2
self
.
sections
=
[]
self
.
num
=
3
self
.
indices_or_sections
=
3
def
setUp
(
self
):
self
.
__class__
.
op_type
=
'split'
self
.
use_xpu
=
True
self
.
use_mkldnn
=
False
self
.
initDefaultParameters
()
self
.
inputs
=
{
'X'
:
self
.
x
}
self
.
attrs
=
{
'axis'
:
self
.
axis
,
'sections'
:
self
.
sections
,
'num'
:
self
.
num
}
out
=
np
.
split
(
self
.
x
,
self
.
indices_or_sections
,
self
.
axis
)
self
.
outputs
=
{
'Out'
:
[(
'out%d'
%
i
,
out
[
i
])
\
for
i
in
range
(
len
(
out
))]}
def
test_check_output
(
self
):
if
paddle
.
is_compiled_with_xpu
():
paddle
.
enable_static
()
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_output_with_place
(
place
)
# unknown sections
class
TestSplitOp_2
(
XPUOpTest
):
def
initDefaultParameters
(
self
):
self
.
dtype
=
'float32'
self
.
x
=
np
.
random
.
random
((
4
,
5
,
6
)).
astype
(
self
.
dtype
)
self
.
axis
=
2
self
.
sections
=
[
2
,
1
,
-
1
]
self
.
num
=
0
self
.
indices_or_sections
=
[
2
,
3
]
def
setUp
(
self
):
self
.
__class__
.
op_type
=
'split'
self
.
use_xpu
=
True
self
.
use_mkldnn
=
False
self
.
initDefaultParameters
()
self
.
inputs
=
{
'X'
:
self
.
x
}
self
.
attrs
=
{
'axis'
:
self
.
axis
,
'sections'
:
self
.
sections
,
'num'
:
self
.
num
}
out
=
np
.
split
(
self
.
x
,
self
.
indices_or_sections
,
self
.
axis
)
self
.
outputs
=
{
'Out'
:
[(
'out%d'
%
i
,
out
[
i
])
\
for
i
in
range
(
len
(
out
))]}
def
test_check_output
(
self
):
if
paddle
.
is_compiled_with_xpu
():
paddle
.
enable_static
()
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_output_with_place
(
place
)
# test with int32
class
TestSplitOp_5
(
XPUOpTest
):
def
initDefaultParameters
(
self
):
self
.
dtype
=
'int32'
self
.
x
=
np
.
random
.
random
((
4
,
5
,
6
)).
astype
(
self
.
dtype
)
self
.
axis
=
2
self
.
sections
=
[]
self
.
num
=
3
self
.
indices_or_sections
=
3
def
setUp
(
self
):
self
.
__class__
.
op_type
=
'split'
self
.
use_xpu
=
True
self
.
use_mkldnn
=
False
self
.
initDefaultParameters
()
self
.
inputs
=
{
'X'
:
self
.
x
}
self
.
attrs
=
{
'axis'
:
self
.
axis
,
'sections'
:
self
.
sections
,
'num'
:
self
.
num
}
out
=
np
.
split
(
self
.
x
,
self
.
indices_or_sections
,
self
.
axis
)
self
.
outputs
=
{
'Out'
:
[(
'out%d'
%
i
,
out
[
i
])
\
for
i
in
range
(
len
(
out
))]}
def
test_check_output
(
self
):
if
paddle
.
is_compiled_with_xpu
():
paddle
.
enable_static
()
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_output_with_place
(
place
)
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
class
XPUTestSplitOp
(
XPUOpTestWrapper
):
def
__init__
(
self
):
self
.
op_name
=
'split'
self
.
use_dynamic_create_class
=
False
# test with attr(num)
class
TestSplitOp
(
XPUOpTest
):
def
setUp
(
self
):
self
.
init_dtype
()
self
.
__class__
.
use_xpu
=
True
self
.
__class__
.
op_type
=
'split'
self
.
use_mkldnn
=
False
self
.
initParameters
()
self
.
inputs
=
{
'X'
:
self
.
x
}
self
.
attrs
=
{
'axis'
:
self
.
axis
,
'sections'
:
self
.
sections
,
'num'
:
self
.
num
}
out
=
np
.
split
(
self
.
x
,
self
.
indices_or_sections
,
self
.
axis
)
self
.
outputs
=
{
'Out'
:
[(
'out%d'
%
i
,
out
[
i
])
\
for
i
in
range
(
len
(
out
))]}
def
init_dtype
(
self
):
self
.
dtype
=
self
.
in_type
def
initParameters
(
self
):
self
.
x
=
np
.
random
.
random
((
4
,
5
,
6
)).
astype
(
self
.
dtype
)
self
.
axis
=
2
self
.
sections
=
[]
self
.
num
=
3
self
.
indices_or_sections
=
3
def
test_check_output
(
self
):
self
.
check_output_with_place
(
paddle
.
XPUPlace
(
0
))
# unknown sections
class
TestSplitOp1
(
TestSplitOp
):
def
initParameters
(
self
):
self
.
x
=
np
.
random
.
random
((
4
,
5
,
6
)).
astype
(
self
.
dtype
)
self
.
axis
=
2
self
.
sections
=
[
2
,
1
,
-
1
]
self
.
num
=
0
self
.
indices_or_sections
=
[
2
,
3
]
# test with int32
class
TestSplitOp2
(
TestSplitOp
):
def
initParameters
(
self
):
self
.
x
=
np
.
random
.
random
((
4
,
5
,
6
)).
astype
(
np
.
int32
)
self
.
axis
=
2
self
.
sections
=
[]
self
.
num
=
3
self
.
indices_or_sections
=
3
support_types
=
get_xpu_op_support_types
(
'split'
)
for
stype
in
support_types
:
create_test_class
(
globals
(),
XPUTestSplitOp
,
stype
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/xpu/test_stack_op_xpu.py
浏览文件 @
139a30ec
# Copyright (c) 202
1
PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 202
2
PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -23,116 +23,120 @@ from op_test_xpu import XPUOpTest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
program_guard
@
skip_check_grad_ci
(
reason
=
"There is no grad kernel for stack_xpu op."
)
class
TestStackOpBase
(
XPUOpTest
):
def
initDefaultParameters
(
self
):
self
.
num_inputs
=
4
self
.
input_dim
=
(
5
,
6
,
7
)
self
.
axis
=
0
self
.
dtype
=
'float32'
def
initParameters
(
self
):
pass
def
get_x_names
(
self
):
x_names
=
[]
for
i
in
range
(
self
.
num_inputs
):
x_names
.
append
(
'x{}'
.
format
(
i
))
return
x_names
def
setUp
(
self
):
self
.
initDefaultParameters
()
self
.
initParameters
()
self
.
op_type
=
'stack'
self
.
x
=
[]
for
i
in
range
(
self
.
num_inputs
):
self
.
x
.
append
(
np
.
random
.
random
(
size
=
self
.
input_dim
).
astype
(
self
.
dtype
))
tmp
=
[]
x_names
=
self
.
get_x_names
()
for
i
in
range
(
self
.
num_inputs
):
tmp
.
append
((
x_names
[
i
],
self
.
x
[
i
]))
self
.
inputs
=
{
'X'
:
tmp
}
self
.
outputs
=
{
'Y'
:
np
.
stack
(
self
.
x
,
axis
=
self
.
axis
)}
self
.
attrs
=
{
'axis'
:
self
.
axis
}
def
test_check_output
(
self
):
if
paddle
.
is_compiled_with_xpu
():
paddle
.
enable_static
()
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_output_with_place
(
place
)
def
test_check_grad
(
self
):
if
self
.
dtype
==
'int64'
or
self
.
dtype
==
'int32'
:
from
op_test_xpu
import
XPUOpTest
from
xpu.get_test_cover_info
import
create_test_class
,
get_xpu_op_support_types
,
XPUOpTestWrapper
paddle
.
enable_static
()
class
XPUTestStackOp
(
XPUOpTestWrapper
):
def
__init__
(
self
):
self
.
op_name
=
'stack'
self
.
use_dynamic_create_class
=
False
@
skip_check_grad_ci
(
reason
=
"There is no grad kernel for stack_xpu op."
)
class
TestStackOp
(
XPUOpTest
):
def
initDefaultParameters
(
self
):
self
.
num_inputs
=
4
self
.
input_dim
=
(
5
,
6
,
7
)
self
.
axis
=
0
self
.
dtype
=
np
.
float32
def
setUp
(
self
):
self
.
initDefaultParameters
()
self
.
initParameters
()
self
.
__class__
.
use_xpu
=
True
self
.
__class__
.
op_type
=
'stack'
self
.
x
=
[]
for
i
in
range
(
self
.
num_inputs
):
self
.
x
.
append
(
np
.
random
.
random
(
size
=
self
.
input_dim
).
astype
(
self
.
dtype
))
tmp
=
[]
x_names
=
self
.
get_x_names
()
for
i
in
range
(
self
.
num_inputs
):
tmp
.
append
((
x_names
[
i
],
self
.
x
[
i
]))
self
.
inputs
=
{
'X'
:
tmp
}
self
.
outputs
=
{
'Y'
:
np
.
stack
(
self
.
x
,
axis
=
self
.
axis
)}
self
.
attrs
=
{
'axis'
:
self
.
axis
}
def
init_dtype
(
self
):
self
.
dtype
=
self
.
in_type
def
initParameters
(
self
):
pass
else
:
if
paddle
.
is_compiled_with_xpu
():
paddle
.
enable_static
()
place
=
paddle
.
XPUPlace
(
0
)
self
.
check_grad_with_place
(
place
,
self
.
get_x_names
(),
'Y'
)
class
TestStackOp1
(
TestStackOpBase
):
def
initParameters
(
self
):
self
.
num_inputs
=
16
class
TestStackOp2
(
TestStackOpBase
):
def
initParameters
(
self
):
self
.
num_inputs
=
30
def
get_x_names
(
self
):
x_names
=
[]
for
i
in
range
(
self
.
num_inputs
):
x_names
.
append
(
'x{}'
.
format
(
i
))
return
x_names
def
test_check_output
(
self
):
self
.
check_output_with_place
(
paddle
.
XPUPlace
(
0
))
class
TestStackOp3
(
TestStackOpBase
):
def
initParameters
(
self
):
self
.
axis
=
-
1
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
int32
or
self
.
dtype
==
np
.
int64
:
pass
else
:
self
.
check_grad_with_place
(
paddle
.
XPUPlace
(
0
),
self
.
get_x_names
(),
'Y'
)
def
test_check_grad
(
self
):
pass
class
TestStackOp1
(
TestStackOp
):
def
initParameters
(
self
):
self
.
num_inputs
=
16
class
TestStackOp2
(
TestStackOp
):
def
initParameters
(
self
):
self
.
num_inputs
=
30
class
TestStackOp4
(
TestStackOpBase
):
def
initParameters
(
self
):
self
.
axis
=
-
4
def
test_check_grad
(
self
):
pass
class
TestStackOp3
(
TestStackOp
):
def
initParameters
(
self
):
self
.
axis
=
-
1
def
test_check_grad
(
self
):
pass
class
TestStackOp5
(
TestStackOpBase
):
def
initParameters
(
self
):
self
.
axis
=
1
class
TestStackOp4
(
TestStackOp
):
def
initParameters
(
self
):
self
.
axis
=
-
4
def
test_check_grad
(
self
):
pass
class
TestStackOp6
(
TestStackOpBase
):
def
initParameters
(
self
):
self
.
axis
=
3
class
TestStackOp5
(
TestStackOp
):
def
initParameters
(
self
):
self
.
axis
=
1
class
TestStackOp6
(
TestStackOp
):
def
initParameters
(
self
):
self
.
axis
=
3
class
TestStackOpint64
(
TestStackOpBase
):
def
initDefaul
tParameters
(
self
):
self
.
num_inputs
=
4
self
.
input_dim
=
(
5
,
6
,
7
)
self
.
axis
=
0
self
.
dtype
=
'int64'
class
TestStackOp7
(
TestStackOp
):
def
ini
tParameters
(
self
):
self
.
num_inputs
=
4
self
.
input_dim
=
(
5
,
6
,
7
)
self
.
axis
=
0
self
.
dtype
=
np
.
int64
def
initParameters
(
self
):
self
.
num_inputs
=
16
def
test_check_grad
(
self
):
pass
class
TestStackOp8
(
TestStackOp
):
def
initParameters
(
self
):
self
.
num_inputs
=
4
self
.
input_dim
=
(
5
,
6
,
7
)
self
.
axis
=
0
self
.
dtype
=
np
.
int32
class
TestStackOpint
(
TestStackOpBase
):
def
initDefaultParameters
(
self
):
self
.
num_inputs
=
4
self
.
input_dim
=
(
5
,
6
,
7
)
self
.
axis
=
0
self
.
dtype
=
'int32'
def
test_check_grad
(
self
):
pass
def
initParameters
(
self
):
self
.
num_inputs
=
16
support_types
=
get_xpu_op_support_types
(
'stack'
)
for
stype
in
support_types
:
create_test_class
(
globals
(),
XPUTestStackOp
,
stype
)
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录