Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
0a95a44b
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0a95a44b
编写于
3月 17, 2018
作者:
K
Kexin Zhao
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add python batch norm inference test
上级
39c676e2
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
70 addition
and
3 deletion
+70
-3
paddle/fluid/operators/batch_norm_op.cu.cc
paddle/fluid/operators/batch_norm_op.cu.cc
+2
-2
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
+68
-1
未找到文件。
paddle/fluid/operators/batch_norm_op.cu.cc
浏览文件 @
0a95a44b
...
@@ -125,8 +125,8 @@ class BatchNormKernel<platform::CUDADeviceContext, T>
...
@@ -125,8 +125,8 @@ class BatchNormKernel<platform::CUDADeviceContext, T>
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
math
::
SetConstant
<
platform
::
CUDADeviceContext
,
T
>
functor
;
math
::
SetConstant
<
platform
::
CUDADeviceContext
,
T
>
functor
;
functor
(
dev_ctx
,
saved_mean
,
0
);
functor
(
dev_ctx
,
saved_mean
,
static_cast
<
T
>
(
0
)
);
functor
(
dev_ctx
,
saved_variance
,
0
);
functor
(
dev_ctx
,
saved_variance
,
static_cast
<
T
>
(
0
)
);
auto
handle
=
dev_ctx
.
cudnn_handle
();
auto
handle
=
dev_ctx
.
cudnn_handle
();
...
...
python/paddle/fluid/tests/unittests/test_batch_norm_op.py
浏览文件 @
0a95a44b
...
@@ -31,6 +31,37 @@ def get_backward_op(scope, op, no_grad_set):
...
@@ -31,6 +31,37 @@ def get_backward_op(scope, op, no_grad_set):
return
backward_op
return
backward_op
def
_reference_testing
(
x
,
scale
,
offset
,
mean
,
var
,
epsilon
,
data_format
):
x_shape
=
x
.
shape
if
len
(
x_shape
)
==
2
:
if
data_format
==
"NCHW"
:
x
=
np
.
reshape
(
x
,
(
x
.
shape
[
0
],
x
.
shape
[
1
],
1
,
1
))
else
:
x
=
np
.
reshape
(
x
,
(
x
.
shape
[
0
],
1
,
1
,
x
.
shape
[
1
]))
if
data_format
==
"NCHW"
:
n
,
c
,
h
,
w
=
x
.
shape
mean_tile
=
np
.
reshape
(
mean
,
(
1
,
c
,
1
,
1
))
mean_tile
=
np
.
tile
(
mean_tile
,
(
n
,
1
,
h
,
w
))
var_tile
=
np
.
reshape
(
var
,
(
1
,
c
,
1
,
1
))
var_tile
=
np
.
tile
(
var_tile
,
(
n
,
1
,
h
,
w
))
normalized
=
(
x
-
mean_tile
)
/
np
.
sqrt
(
var_tile
+
epsilon
)
scale_tile
=
np
.
reshape
(
scale
,
(
1
,
c
,
1
,
1
))
scale_tile
=
np
.
tile
(
scale_tile
,
(
n
,
1
,
h
,
w
))
offset_tile
=
np
.
reshape
(
offset
,
(
1
,
c
,
1
,
1
))
offset_tile
=
np
.
reshape
(
offset_tile
,
(
1
,
c
,
1
,
1
))
y
=
normalized
*
scale_tile
+
offset_tile
elif
data_format
==
"NHWC"
:
normalized
=
(
x
-
mean
)
/
np
.
sqrt
(
var
+
epsilon
)
y
=
normalized
*
scale
+
offset
else
:
raise
ValueError
(
"Unknown data order."
)
if
len
(
x_shape
)
==
2
:
y
=
np
.
reshape
(
y
,
x_shape
)
return
y
def
_reference_training
(
x
,
scale
,
offset
,
epsilon
,
data_format
):
def
_reference_training
(
x
,
scale
,
offset
,
epsilon
,
data_format
):
x_shape
=
x
.
shape
x_shape
=
x
.
shape
if
len
(
x_shape
)
==
2
:
if
len
(
x_shape
)
==
2
:
...
@@ -155,7 +186,43 @@ def set_output_grad(scope, outputs, place, feed_dict=None):
...
@@ -155,7 +186,43 @@ def set_output_grad(scope, outputs, place, feed_dict=None):
__set_tensor__
(
output
,
data
)
__set_tensor__
(
output
,
data
)
class
TestBatchNormOp
(
OpTest
):
class
TestBatchNormOpInference
(
OpTest
):
def
setUp
(
self
):
self
.
dtype
=
np
.
float32
def
test_python
(
self
):
data_format
=
"NHWC"
epsilon
=
0.00001
n
,
h
,
w
,
c
=
2
,
3
,
4
,
5
x_shape
=
[
n
,
h
,
w
,
c
]
scale_shape
=
[
c
]
x_val
=
np
.
random
.
random_sample
(
x_shape
).
astype
(
self
.
dtype
)
scale_val
=
np
.
random
.
random_sample
(
scale_shape
).
astype
(
self
.
dtype
)
bias_val
=
np
.
random
.
random_sample
(
scale_shape
).
astype
(
self
.
dtype
)
mean
=
np
.
zeros
(
scale_shape
).
astype
(
self
.
dtype
)
variance
=
np
.
ones
(
scale_shape
).
astype
(
self
.
dtype
)
# run forward
y_out
=
_reference_testing
(
x_val
,
scale_val
,
bias_val
,
mean
,
variance
,
epsilon
,
"NHWC"
)
# running N, C, H, W case
# should produce the same results
x_shape2
=
[
n
,
c
,
h
,
w
]
x_val2
=
np
.
transpose
(
x_val
,
(
0
,
3
,
1
,
2
))
y_out2
=
_reference_testing
(
x_val2
,
scale_val
,
bias_val
,
mean
,
variance
,
epsilon
,
"NCHW"
)
# transfer (N, C, H, W) back to (N, H, W, C)
y_out2_trans
=
np
.
transpose
(
y_out2
,
(
0
,
2
,
3
,
1
))
self
.
__assert_close
(
y_out
,
y_out2_trans
,
"inference output"
)
print
'python: NHWC, NCHW, inference checking passed'
class
TestBatchNormOpTraining
(
OpTest
):
def
__assert_close
(
self
,
tensor
,
np_array
,
msg
,
atol
=
1e-4
):
def
__assert_close
(
self
,
tensor
,
np_array
,
msg
,
atol
=
1e-4
):
self
.
assertTrue
(
np
.
allclose
(
np
.
array
(
tensor
),
np_array
,
atol
=
atol
),
msg
)
self
.
assertTrue
(
np
.
allclose
(
np
.
array
(
tensor
),
np_array
,
atol
=
atol
),
msg
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录