Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
3233b2b3
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3233b2b3
编写于
3月 18, 2018
作者:
K
Kexin Zhao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update test
上级
5e36757c
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
93 addition
and
76 deletion
+93
-76
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
+93
-76
未找到文件。
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
浏览文件 @
3233b2b3
...
...
@@ -187,99 +187,116 @@ def set_output_grad(scope, outputs, place, feed_dict=None):
class
TestBatchNormOpInference
(
OpTest
):
def
setUp
(
self
):
self
.
dtype
=
np
.
float32
def
__assert_close
(
self
,
tensor
,
np_array
,
msg
,
atol
=
1e-4
):
self
.
assertTrue
(
np
.
allclose
(
np
.
array
(
tensor
),
np_array
,
atol
=
atol
),
msg
)
def
test_inference
(
self
):
def
test_with_place
(
place
,
data_layout
,
dtype
,
shape
):
epsilon
=
0.00001
if
len
(
shape
)
==
2
:
x_shape
=
shape
c
=
x_shape
[
1
]
def
check_with_place
(
place
,
data_layout
,
dtype
,
shape
):
epsilon
=
0.00001
if
len
(
shape
)
==
2
:
x_shape
=
shape
c
=
x_shape
[
1
]
else
:
n
,
h
,
w
,
c
=
shape
[
0
],
shape
[
1
],
shape
[
2
],
shape
[
3
]
if
data_layout
==
"NHWC"
:
x_shape
=
[
n
,
h
,
w
,
c
]
elif
data_layout
==
"NCHW"
:
x_shape
=
[
n
,
c
,
h
,
w
]
else
:
n
,
h
,
w
,
c
=
shape
[
0
],
shape
[
1
],
shape
[
2
],
shape
[
3
]
if
data_layout
==
"NHWC"
:
x_shape
=
[
n
,
h
,
w
,
c
]
elif
data_layout
==
"NCHW"
:
x_shape
=
[
n
,
c
,
h
,
w
]
else
:
raise
ValueError
(
"Unknown data layout."
)
scale_shape
=
[
c
]
x_val
=
np
.
random
.
random_sample
(
x_shape
).
astype
(
dtype
)
scale_val
=
np
.
random
.
random_sample
(
scale_shape
).
astype
(
dtype
)
bias_val
=
np
.
random
.
random_sample
(
scale_shape
).
astype
(
dtype
)
mean
=
np
.
zeros
(
scale_shape
).
astype
(
dtype
)
variance
=
np
.
ones
(
scale_shape
).
astype
(
dtype
)
y_out
=
_reference_testing
(
x_val
,
scale_val
,
bias_val
,
mean
,
variance
,
epsilon
,
data_layout
).
astype
(
dtype
)
raise
ValueError
(
"Unknown data layout."
)
scale_shape
=
[
c
]
scope
=
core
.
Scope
()
x_val
=
np
.
random
.
random_sample
(
x_shape
).
astype
(
dtype
)
scale_val
=
np
.
random
.
random_sample
(
scale_shape
).
astype
(
dtype
)
bias_val
=
np
.
random
.
random_sample
(
scale_shape
).
astype
(
dtype
)
# create input
x_tensor
=
create_or_get_tensor
(
scope
,
"x_val"
,
OpTest
.
np_dtype_to_fluid_dtype
(
x_val
),
place
)
scale_tensor
=
create_or_get_tensor
(
scope
,
"scale_val"
,
OpTest
.
np_dtype_to_fluid_dtype
(
scale_val
),
place
)
bias_tensor
=
create_or_get_tensor
(
scope
,
"bias_val"
,
OpTest
.
np_dtype_to_fluid_dtype
(
bias_val
),
place
)
mean_tensor
=
create_or_get_tensor
(
scope
,
"mean"
,
OpTest
.
np_dtype_to_fluid_dtype
(
mean
),
place
)
variance_tensor
=
create_or_get_tensor
(
scope
,
"variance"
,
OpTest
.
np_dtype_to_fluid_dtype
(
variance
),
place
)
mean
=
np
.
zeros
(
scale_shape
).
astype
(
dtype
)
variance
=
np
.
ones
(
scale_shape
).
astype
(
dtype
)
# create output
y_tensor
=
create_or_get_tensor
(
scope
,
"y_out"
,
None
,
place
)
saved_mean_tensor
=
create_or_get_tensor
(
scope
,
"saved_mean"
,
None
,
place
)
saved_variance_tensor
=
create_or_get_tensor
(
scope
,
"saved_variance"
,
None
,
place
)
mean_out_tensor
=
mean_tensor
variance_out_tensor
=
variance_tensor
y_out
=
_reference_testing
(
x_val
,
scale_val
,
bias_val
,
mean
,
variance
,
epsilon
,
data_layout
).
astype
(
dtype
)
scope
=
core
.
Scope
()
# create input
x_tensor
=
create_or_get_tensor
(
scope
,
"x_val"
,
OpTest
.
np_dtype_to_fluid_dtype
(
x_val
),
place
)
scale_tensor
=
create_or_get_tensor
(
scope
,
"scale_val"
,
OpTest
.
np_dtype_to_fluid_dtype
(
scale_val
),
place
)
bias_tensor
=
create_or_get_tensor
(
scope
,
"bias_val"
,
OpTest
.
np_dtype_to_fluid_dtype
(
bias_val
),
place
)
mean_tensor
=
create_or_get_tensor
(
scope
,
"mean"
,
OpTest
.
np_dtype_to_fluid_dtype
(
mean
),
place
)
variance_tensor
=
create_or_get_tensor
(
scope
,
"variance"
,
OpTest
.
np_dtype_to_fluid_dtype
(
variance
),
place
)
# create output
y_tensor
=
create_or_get_tensor
(
scope
,
"y_out"
,
None
,
place
)
saved_mean_tensor
=
create_or_get_tensor
(
scope
,
"saved_mean"
,
None
,
place
)
saved_variance_tensor
=
create_or_get_tensor
(
scope
,
"saved_variance"
,
None
,
place
)
mean_out_tensor
=
mean_tensor
variance_out_tensor
=
variance_tensor
batch_norm_op
=
Operator
(
"batch_norm"
,
# inputs
X
=
"x_val"
,
Scale
=
"scale_val"
,
Bias
=
"bias_val"
,
Mean
=
"mean"
,
Variance
=
"variance"
,
# outputs
Y
=
"y_out"
,
MeanOut
=
"mean"
,
VarianceOut
=
"variance"
,
SavedMean
=
"saved_mean"
,
SavedVariance
=
"saved_variance"
,
# attrs
is_test
=
True
,
data_layout
=
data_layout
,
epsilon
=
epsilon
)
batch_norm_op
.
run
(
scope
,
place
)
# check inference result
self
.
__assert_close
(
y_tensor
,
y_out
,
"inference output are different at "
+
str
(
place
)
+
", "
+
data_layout
+
", "
+
str
(
np
.
dtype
(
dtype
)))
def
test_check_output
(
self
):
places
=
[
core
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
()
and
core
.
op_support_gpu
(
"batch_norm"
):
places
.
append
(
core
.
CUDAPlace
(
0
))
batch_norm_op
=
Operator
(
"batch_norm"
,
# inputs
X
=
"x_val"
,
Scale
=
"scale_val"
,
Bias
=
"bias_val"
,
Mean
=
"mean"
,
Variance
=
"variance"
,
# outputs
Y
=
"y_out"
,
MeanOut
=
"mean"
,
VarianceOut
=
"variance"
,
SavedMean
=
"saved_mean"
,
SavedVariance
=
"saved_variance"
,
# attrs
is_test
=
True
,
data_layout
=
data_layout
,
epsilon
=
epsilon
)
for
place
in
places
:
for
data_format
in
[
"NCHW"
,
"NHWC"
]:
check_with_place
(
place
,
data_format
,
self
.
dtype
,
[
2
,
3
,
4
,
5
])
check_with_place
(
place
,
data_format
,
self
.
dtype
,
[
2
,
3
])
batch_norm_op
.
run
(
scope
,
place
)
# check inference result
self
.
__assert_close
(
y_tensor
,
y_out
,
"inference output are different at "
+
str
(
place
)
+
", "
+
data_layout
+
", "
+
str
(
np
.
dtype
(
dtype
)))
class
TestFP16BatchNormOpInference
(
TestBatchNormOpInference
):
def
setUp
(
self
):
self
.
dtype
=
np
.
float16
places
=
[
core
.
CPUPlace
()]
def
test_check_output
(
self
):
places
=
[]
if
core
.
is_compiled_with_cuda
()
and
core
.
op_support_gpu
(
"batch_norm"
):
place
=
core
.
CUDAPlace
(
0
)
if
self
.
dtype
!=
np
.
float16
or
core
.
is_float16_supported
(
place
):
if
core
.
is_float16_supported
(
place
):
places
.
append
(
place
)
for
place
in
places
:
for
data_format
in
[
"NCHW"
,
"NHWC"
]:
for
dtype
in
[
np
.
float32
,
np
.
float16
]:
test_with_place
(
place
,
data_format
,
dtype
,
[
2
,
3
,
4
,
5
])
test_with_place
(
place
,
data_format
,
dtype
,
[
2
,
3
])
check_output_with_place
(
place
,
data_format
,
self
.
dtype
,
[
2
,
3
,
4
,
5
])
check_output_with_place
(
place
,
data_format
,
self
.
dtype
,
[
2
,
3
])
class
TestBatchNormOpTraining
(
OpTest
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录