Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
55164761
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
55164761
编写于
1月 25, 2022
作者:
F
fwenguang
提交者:
GitHub
1月 25, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[MLU]add mlu batch_norm kernel pytest (#39071)
上级
ac3dc0bb
变更
2
展开全部
隐藏空白更改
内联
并排
Showing
2 changed file
with
997 addition
and
0 deletion
+997
-0
python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py
...addle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py
+702
-0
python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py
...le/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py
+295
-0
未找到文件。
python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu.py
0 → 100644
浏览文件 @
55164761
此差异已折叠。
点击以展开。
python/paddle/fluid/tests/unittests/mlu/test_batch_norm_op_mlu_v2.py
0 → 100644
浏览文件 @
55164761
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
os
import
unittest
import
numpy
as
np
import
paddle.fluid.core
as
core
from
paddle.fluid.op
import
Operator
import
paddle.fluid
as
fluid
import
sys
sys
.
path
.
append
(
".."
)
from
op_test
import
OpTest
,
_set_use_system_allocator
from
paddle.fluid.framework
import
grad_var_name
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
program_guard
import
paddle
class
TestBatchNorm
(
unittest
.
TestCase
):
def
test_name
(
self
):
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_mlu
():
places
.
append
(
fluid
.
MLUPlace
(
0
))
for
p
in
places
:
with
fluid
.
dygraph
.
guard
(
p
):
batch_norm1d
=
paddle
.
nn
.
BatchNorm1D
(
1
,
name
=
"test"
)
def
test_error
(
self
):
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_mlu
():
places
.
append
(
fluid
.
MLUPlace
(
0
))
for
p
in
places
:
#paddle.disable_static()
x_data_4
=
np
.
random
.
random
(
size
=
(
2
,
1
,
3
,
3
)).
astype
(
'float32'
)
x_data_3
=
np
.
random
.
random
(
size
=
(
2
,
1
,
3
)).
astype
(
'float32'
)
def
error1d_dataformat
():
x_data_4
=
np
.
random
.
random
(
size
=
(
2
,
1
,
3
,
3
)).
astype
(
'float32'
)
batch_norm1d
=
paddle
.
nn
.
BatchNorm1D
(
1
,
data_format
=
'NCDHW'
)
batch_norm1d
(
fluid
.
dygraph
.
to_variable
(
x_data_4
))
def
error2d_dataformat
():
x_data_3
=
np
.
random
.
random
(
size
=
(
2
,
1
,
3
)).
astype
(
'float32'
)
batch_norm2d
=
paddle
.
nn
.
BatchNorm2D
(
1
,
data_format
=
'NCDHW'
)
batch_norm2d
(
fluid
.
dygraph
.
to_variable
(
x_data_3
))
def
error3d_dataformat
():
x_data_4
=
np
.
random
.
random
(
size
=
(
2
,
1
,
3
,
3
)).
astype
(
'float32'
)
batch_norm3d
=
paddle
.
nn
.
BatchNorm3D
(
1
,
data_format
=
'NCL'
)
batch_norm3d
(
fluid
.
dygraph
.
to_variable
(
x_data_4
))
def
error1d
():
x_data_4
=
np
.
random
.
random
(
size
=
(
2
,
1
,
3
,
3
)).
astype
(
'float32'
)
batch_norm1d
=
paddle
.
nn
.
BatchNorm1D
(
1
)
batch_norm1d
(
fluid
.
dygraph
.
to_variable
(
x_data_4
))
def
error2d
():
x_data_3
=
np
.
random
.
random
(
size
=
(
2
,
1
,
3
)).
astype
(
'float32'
)
batch_norm2d
=
paddle
.
nn
.
BatchNorm2D
(
1
)
batch_norm2d
(
fluid
.
dygraph
.
to_variable
(
x_data_3
))
def
error3d
():
x_data_4
=
np
.
random
.
random
(
size
=
(
2
,
1
,
3
,
3
)).
astype
(
'float32'
)
batch_norm3d
=
paddle
.
nn
.
BatchNorm3D
(
1
)
batch_norm3d
(
fluid
.
dygraph
.
to_variable
(
x_data_4
))
with
fluid
.
dygraph
.
guard
(
p
):
self
.
assertRaises
(
ValueError
,
error1d
)
self
.
assertRaises
(
ValueError
,
error2d
)
self
.
assertRaises
(
ValueError
,
error3d
)
self
.
assertRaises
(
ValueError
,
error1d_dataformat
)
self
.
assertRaises
(
ValueError
,
error2d_dataformat
)
self
.
assertRaises
(
ValueError
,
error3d_dataformat
)
def
test_dygraph
(
self
):
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_mlu
():
places
.
append
(
fluid
.
MLUPlace
(
0
))
for
p
in
places
:
shape
=
[
4
,
10
,
4
,
4
]
def
compute_v1
(
x
,
is_test
,
trainable_statistics
):
with
fluid
.
dygraph
.
guard
(
p
):
bn
=
fluid
.
dygraph
.
BatchNorm
(
shape
[
1
],
is_test
=
is_test
,
trainable_statistics
=
trainable_statistics
)
y
=
bn
(
fluid
.
dygraph
.
to_variable
(
x
))
return
y
.
numpy
()
def
compute_v2
(
x
):
with
fluid
.
dygraph
.
guard
(
p
):
bn
=
paddle
.
nn
.
BatchNorm2D
(
shape
[
1
])
y
=
bn
(
fluid
.
dygraph
.
to_variable
(
x
))
return
y
.
numpy
()
def
compute_v3
(
x
,
is_test
,
trainable_statistics
):
with
fluid
.
dygraph
.
guard
(
p
):
bn
=
fluid
.
dygraph
.
BatchNorm
(
shape
[
1
],
is_test
=
is_test
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
1.0
),
trainable
=
False
),
bias_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
0.0
),
trainable
=
False
),
trainable_statistics
=
trainable_statistics
)
y
=
bn
(
fluid
.
dygraph
.
to_variable
(
x
))
return
y
.
numpy
()
def
compute_v4
(
x
):
with
fluid
.
dygraph
.
guard
(
p
):
bn
=
paddle
.
nn
.
BatchNorm2D
(
shape
[
1
],
weight_attr
=
False
,
bias_attr
=
False
)
y
=
bn
(
fluid
.
dygraph
.
to_variable
(
x
))
return
y
.
numpy
()
x
=
np
.
random
.
randn
(
*
shape
).
astype
(
"float32"
)
y1
=
compute_v1
(
x
,
False
,
False
)
y2
=
compute_v2
(
x
)
y3
=
compute_v3
(
x
,
False
,
False
)
y4
=
compute_v4
(
x
)
self
.
assertTrue
(
np
.
allclose
(
y1
,
y2
))
self
.
assertTrue
(
np
.
allclose
(
y3
,
y4
))
def
test_static
(
self
):
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_mlu
():
places
.
append
(
fluid
.
MLUPlace
(
0
))
for
p
in
places
:
exe
=
fluid
.
Executor
(
p
)
shape
=
[
4
,
10
,
16
,
16
]
def
compute_v1
(
x_np
,
is_test
,
trainable_statistics
):
with
program_guard
(
Program
(),
Program
()):
bn
=
fluid
.
dygraph
.
BatchNorm
(
shape
[
1
],
is_test
=
is_test
,
trainable_statistics
=
trainable_statistics
)
x
=
fluid
.
data
(
name
=
'x'
,
shape
=
x_np
.
shape
,
dtype
=
x_np
.
dtype
)
y
=
bn
(
x
)
exe
.
run
(
fluid
.
default_startup_program
())
r
=
exe
.
run
(
feed
=
{
'x'
:
x_np
},
fetch_list
=
[
y
])[
0
]
return
r
def
compute_v2
(
x_np
):
with
program_guard
(
Program
(),
Program
()):
bn
=
paddle
.
nn
.
BatchNorm2D
(
shape
[
1
])
x
=
fluid
.
data
(
name
=
'x'
,
shape
=
x_np
.
shape
,
dtype
=
x_np
.
dtype
)
y
=
bn
(
x
)
exe
.
run
(
fluid
.
default_startup_program
())
r
=
exe
.
run
(
feed
=
{
'x'
:
x_np
},
fetch_list
=
[
y
])[
0
]
return
r
x
=
np
.
random
.
randn
(
*
shape
).
astype
(
"float32"
)
y1
=
compute_v1
(
x
,
False
,
False
)
y2
=
compute_v2
(
x
)
self
.
assertTrue
(
np
.
allclose
(
y1
,
y2
))
class
TestBatchNormChannelLast
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
original_dtyep
=
paddle
.
get_default_dtype
()
paddle
.
set_default_dtype
(
"float32"
)
self
.
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_mlu
():
self
.
places
.
append
(
fluid
.
MLUPlace
(
0
))
def
tearDown
(
self
):
paddle
.
set_default_dtype
(
self
.
original_dtyep
)
def
test_1d
(
self
):
for
p
in
self
.
places
:
with
fluid
.
dygraph
.
guard
(
p
):
x
=
paddle
.
randn
([
2
,
6
,
4
])
net1
=
paddle
.
nn
.
BatchNorm1D
(
4
,
data_format
=
"NLC"
)
net2
=
paddle
.
nn
.
BatchNorm1D
(
4
)
net2
.
weight
=
net1
.
weight
net2
.
bias
=
net1
.
bias
y1
=
net1
(
x
)
channel_first_x
=
paddle
.
transpose
(
x
,
[
0
,
2
,
1
])
y2
=
net2
(
channel_first_x
)
y2
=
paddle
.
transpose
(
y2
,
[
0
,
2
,
1
])
self
.
assertEqual
(
np
.
allclose
(
y1
.
numpy
(),
y2
.
numpy
(),
atol
=
1e-07
),
True
)
def
test_2d
(
self
):
for
p
in
self
.
places
:
with
fluid
.
dygraph
.
guard
(
p
):
x
=
paddle
.
randn
([
2
,
6
,
6
,
4
])
net1
=
paddle
.
nn
.
BatchNorm2D
(
4
,
data_format
=
"NHWC"
)
net2
=
paddle
.
nn
.
BatchNorm2D
(
4
)
net2
.
weight
=
net1
.
weight
net2
.
bias
=
net1
.
bias
y1
=
net1
(
x
)
channel_first_x
=
paddle
.
transpose
(
x
,
[
0
,
3
,
1
,
2
])
y2
=
net2
(
channel_first_x
)
y2
=
paddle
.
transpose
(
y2
,
[
0
,
2
,
3
,
1
])
self
.
assertEqual
(
np
.
allclose
(
y1
.
numpy
(),
y2
.
numpy
(),
atol
=
1e-07
),
True
)
def
test_3d
(
self
):
for
p
in
self
.
places
:
with
fluid
.
dygraph
.
guard
(
p
):
x
=
paddle
.
randn
([
2
,
6
,
6
,
6
,
4
])
net1
=
paddle
.
nn
.
BatchNorm3D
(
4
,
data_format
=
"NDHWC"
)
net2
=
paddle
.
nn
.
BatchNorm3D
(
4
)
net2
.
weight
=
net1
.
weight
net2
.
bias
=
net1
.
bias
y1
=
net1
(
x
)
channel_first_x
=
paddle
.
transpose
(
x
,
[
0
,
4
,
1
,
2
,
3
])
y2
=
net2
(
channel_first_x
)
y2
=
paddle
.
transpose
(
y2
,
[
0
,
2
,
3
,
4
,
1
])
self
.
assertEqual
(
np
.
allclose
(
y1
.
numpy
(),
y2
.
numpy
(),
atol
=
1e-07
),
True
)
# res = np.allclose(y1.numpy(), y2.numpy())
# if res == False:
# np.savetxt("./y1.txt", y1.numpy().flatten(), fmt='%.10f', delimiter='\n')
# np.savetxt("./y2.txt", y2.numpy().flatten(), fmt='%.10f', delimiter='\n')
# self.assertEqual(res, True)
class
TestBatchNormUseGlobalStats
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_mlu
():
self
.
places
.
append
(
fluid
.
MLUPlace
(
0
))
self
.
init_test
()
### train mode
def
init_test
(
self
):
self
.
use_global_stats
=
True
self
.
trainable_statistics
=
False
def
test_global_stats
(
self
):
for
p
in
self
.
places
:
with
fluid
.
dygraph
.
guard
(
p
):
x
=
paddle
.
randn
([
2
,
6
,
6
,
4
])
net1
=
paddle
.
fluid
.
dygraph
.
BatchNorm
(
6
,
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Constant
(
1.0
)),
use_global_stats
=
self
.
use_global_stats
,
trainable_statistics
=
self
.
trainable_statistics
)
net2
=
paddle
.
nn
.
BatchNorm2D
(
6
,
use_global_stats
=
self
.
use_global_stats
)
net2
.
weight
=
net1
.
weight
net2
.
bias
=
net1
.
bias
if
self
.
trainable_statistics
==
True
:
net1
.
training
=
False
net2
.
training
=
False
y1
=
net1
(
x
)
y2
=
net2
(
x
)
self
.
assertEqual
(
np
.
allclose
(
y1
.
numpy
(),
y2
.
numpy
()),
True
)
class
TestBatchNormUseGlobalStatsCase1
(
TestBatchNormUseGlobalStats
):
### test mode
def
init_test
(
self
):
self
.
use_global_stats
=
False
self
.
trainable_statistics
=
True
class
TestBatchNormUseGlobalStatsCase2
(
TestBatchNormUseGlobalStats
):
### train mode
def
init_test
(
self
):
self
.
use_global_stats
=
False
self
.
trainable_statistics
=
False
class
TestBatchNormUseGlobalStatsCase3
(
TestBatchNormUseGlobalStats
):
### test mode
def
init_test
(
self
):
self
.
use_global_stats
=
True
self
.
trainable_statistics
=
True
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录