Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
a2dfb104
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a2dfb104
编写于
9月 13, 2016
作者:
E
emailweixu
提交者:
GitHub
9月 13, 2016
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request
#2
from baidu/master
Update from the original
上级
5c6ecb27
7dbc092c
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
11 addition
and
29 deletion
+11
-29
paddle/cuda/src/hl_cuda_cudnn.cc
paddle/cuda/src/hl_cuda_cudnn.cc
+7
-9
paddle/gserver/layers/CudnnBatchNormLayer.cpp
paddle/gserver/layers/CudnnBatchNormLayer.cpp
+0
-18
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+1
-1
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+3
-1
未找到文件。
paddle/cuda/src/hl_cuda_cudnn.cc
浏览文件 @
a2dfb104
...
...
@@ -150,7 +150,7 @@ CUDNN_DNN_ROUTINE_EACH_AFTER_R3(DYNAMIC_LOAD_CUDNN_WRAP)
// APIs available after R4:
#if CUDNN_VERSION >= 400
0
#if CUDNN_VERSION >= 400
7
#define CUDNN_DNN_ROUTINE_EACH_AFTER_R4(__macro) \
__macro(cudnnBatchNormalizationForwardTraining) \
__macro(cudnnBatchNormalizationForwardInference) \
...
...
@@ -999,7 +999,7 @@ void hl_batch_norm_forward_training(hl_tensor_descriptor inputDesc,
double
epsilon
,
real
*
savedMean
,
real
*
savedVar
)
{
#if CUDNN_VERSION >= 400
0
#if CUDNN_VERSION >= 400
7
if
((
NULL
!=
runningMean
&&
NULL
==
runningInvVar
)
||
(
NULL
==
runningMean
&&
NULL
!=
runningInvVar
))
{
LOG
(
FATAL
)
<<
"runningMean and runningInvVar can be NULL "
...
...
@@ -1024,7 +1024,7 @@ void hl_batch_norm_forward_training(hl_tensor_descriptor inputDesc,
CHECK_SYNC
(
"hl_batch_norm_forward_training failed"
);
#else
LOG
(
FATAL
)
<<
"CudnnBatchNorm requires cudnn version >= 400
0
. "
LOG
(
FATAL
)
<<
"CudnnBatchNorm requires cudnn version >= 400
7
. "
<<
"But cudnn lib version is "
<<
g_cudnn_lib_version
;
#endif
}
...
...
@@ -1039,7 +1039,7 @@ void hl_batch_norm_forward_inference(hl_tensor_descriptor inputDesc,
real
*
estimatedMean
,
real
*
estimatedInvVar
,
double
epsilon
)
{
#if CUDNN_VERSION >= 400
0
#if CUDNN_VERSION >= 400
7
cudnnTensorDescriptor_t
xDesc
=
GET_TENSOR_DESCRIPTOR
(
inputDesc
);
cudnnTensorDescriptor_t
yDesc
=
GET_TENSOR_DESCRIPTOR
(
outputDesc
);
cudnnTensorDescriptor_t
bnDesc
=
GET_TENSOR_DESCRIPTOR
(
bnParamDesc
);
...
...
@@ -1053,7 +1053,7 @@ void hl_batch_norm_forward_inference(hl_tensor_descriptor inputDesc,
CHECK_SYNC
(
"hl_batch_norm_forward_inference failed"
);
#else
LOG
(
FATAL
)
<<
"CudnnBatchNorm requires cudnn version >= 400
0
. "
LOG
(
FATAL
)
<<
"CudnnBatchNorm requires cudnn version >= 400
7
. "
<<
"But cudnn lib version is "
<<
g_cudnn_lib_version
;
#endif
}
...
...
@@ -1071,7 +1071,7 @@ void hl_batch_norm_backward(hl_tensor_descriptor inputDesc,
double
epsilon
,
real
*
savedMean
,
real
*
savedInvVar
)
{
#if CUDNN_VERSION >= 400
0
#if CUDNN_VERSION >= 400
7
if
((
NULL
!=
savedMean
&&
NULL
==
savedInvVar
)
||
(
NULL
==
savedMean
&&
NULL
!=
savedInvVar
))
{
LOG
(
FATAL
)
<<
"savedMean and savedVar can be NULL "
...
...
@@ -1087,16 +1087,14 @@ void hl_batch_norm_backward(hl_tensor_descriptor inputDesc,
cudnnBatchNormMode_t
mode
=
CUDNN_BATCHNORM_SPATIAL
;
CHECK_CUDNN
(
dynload
::
cudnnBatchNormalizationBackward
(
t_resource
.
cudnn_handle
,
mode
,
&
alpha
,
&
beta
,
#if CUDNN_VERSION >= 5000
&
alpha
,
&
beta
,
#endif
xDesc
,
input
,
dyDesc
,
outGrad
,
dxDesc
,
inGrad
,
bnDesc
,
scale
,
scaleGrad
,
biasGrad
,
epsilon
,
savedMean
,
savedInvVar
));
CHECK_SYNC
(
"hl_batch_norm_backward failed"
);
#else
LOG
(
FATAL
)
<<
"CudnnBatchNorm requires cudnn version >= 400
0
. "
LOG
(
FATAL
)
<<
"CudnnBatchNorm requires cudnn version >= 400
7
. "
<<
"But cudnn lib version is "
<<
g_cudnn_lib_version
;
#endif
}
paddle/gserver/layers/CudnnBatchNormLayer.cpp
浏览文件 @
a2dfb104
...
...
@@ -115,29 +115,11 @@ void CudnnBatchNormLayer::backward(const UpdateCallback& callback) {
create
(
tmpBiasGrad_
,
1
,
channels_
,
&
betaGrad
);
}
// because of the different api of cudnn v4 and v5.
if
(
hl_get_cudnn_lib_version
()
<
5000
)
{
if
(
weight_
->
getWGrad
())
{
create
(
tmpWGrad_
,
1
,
channels_
,
&
gammaGrad
);
}
if
(
biases_
&&
biases_
->
getWGrad
())
{
create
(
tmpBiasGrad_
,
1
,
channels_
,
&
betaGrad
);
}
}
hl_batch_norm_backward
(
ioDesc_
,
input
,
ioDesc_
,
outGrad
,
ioDesc_
,
inGrad
,
bnParamDesc_
,
gamma
,
gammaGrad
,
betaGrad
,
EPS
,
savedMean
,
savedInvVar
);
// because of the different api of cudnn v4 and v5.
if
(
hl_get_cudnn_lib_version
()
<
5000
)
{
if
(
weight_
->
getWGrad
()
&&
biases_
->
getWGrad
())
{
weight_
->
getWGrad
()
->
add
(
*
tmpWGrad_
);
biases_
->
getWGrad
()
->
add
(
*
tmpBiasGrad_
);
}
}
{
REGISTER_TIMER_INFO
(
"WeightUpdate"
,
getName
().
c_str
());
biases_
->
getParameterPtr
()
->
incUpdate
(
callback
);
...
...
python/paddle/trainer/config_parser.py
浏览文件 @
a2dfb104
...
...
@@ -1614,7 +1614,7 @@ class BatchNormLayer(LayerBase):
# Also based on cudnn version.
use_cudnn
=
use_gpu
and
batch_norm_type
!=
"batch_norm"
and
\
((
not
parallel_nn
)
or
self
.
config
.
device
>
-
1
)
and
\
cudnn_version
>=
400
0
cudnn_version
>=
400
7
self
.
layer_type
=
"cudnn_batch_norm"
if
use_cudnn
else
"batch_norm"
super
(
BatchNormLayer
,
self
).
__init__
(
name
,
self
.
layer_type
,
0
,
active_type
=
active_type
,
...
...
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
a2dfb104
...
...
@@ -171,6 +171,8 @@ class LayerOutput(object):
assert
LayerType
.
is_layer_type
(
layer_type
)
self
.
name
=
name
self
.
layer_type
=
layer_type
if
parents
is
not
None
and
type
(
parents
)
!=
list
:
parents
=
[
parents
]
self
.
parents
=
[]
if
parents
is
None
else
parents
self
.
activation
=
activation
self
.
num_filters
=
num_filters
...
...
@@ -512,7 +514,7 @@ class MixedLayerType(LayerOutput):
:rtype: MixedLayerType
"""
if
not
self
.
finalized
:
assert
isinstance
(
other
,
Projection
)
assert
isinstance
(
other
,
Projection
)
or
isinstance
(
other
,
Operator
)
self
.
inputs
.
append
(
other
)
self
.
parents
.
append
(
other
.
origin
)
return
self
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录