Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
b8afb140
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
694
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
b8afb140
编写于
2月 22, 2017
作者:
W
wangyang59
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
cudnn deconv implememtation
上级
5a933b44
变更
15
显示空白变更内容
内联
并排
Showing
15 changed file
with
789 addition
and
389 deletion
+789
-389
paddle/gserver/CMakeLists.txt
paddle/gserver/CMakeLists.txt
+4
-0
paddle/gserver/layers/ConvBaseProjection.cpp
paddle/gserver/layers/ConvBaseProjection.cpp
+204
-0
paddle/gserver/layers/ConvBaseProjection.h
paddle/gserver/layers/ConvBaseProjection.h
+162
-0
paddle/gserver/layers/ConvProjection.cpp
paddle/gserver/layers/ConvProjection.cpp
+3
-170
paddle/gserver/layers/ConvProjection.h
paddle/gserver/layers/ConvProjection.h
+5
-94
paddle/gserver/layers/ConvTransProjection.cpp
paddle/gserver/layers/ConvTransProjection.cpp
+95
-0
paddle/gserver/layers/ConvTransProjection.h
paddle/gserver/layers/ConvTransProjection.h
+41
-0
paddle/gserver/layers/CudnnConvBaseLayer.cpp
paddle/gserver/layers/CudnnConvBaseLayer.cpp
+121
-0
paddle/gserver/layers/CudnnConvBaseLayer.h
paddle/gserver/layers/CudnnConvBaseLayer.h
+53
-0
paddle/gserver/layers/CudnnConvLayer.cpp
paddle/gserver/layers/CudnnConvLayer.cpp
+0
-93
paddle/gserver/layers/CudnnConvLayer.h
paddle/gserver/layers/CudnnConvLayer.h
+5
-20
paddle/gserver/layers/CudnnConvTransLayer.cpp
paddle/gserver/layers/CudnnConvTransLayer.cpp
+23
-0
paddle/gserver/layers/CudnnConvTransLayer.h
paddle/gserver/layers/CudnnConvTransLayer.h
+41
-0
paddle/gserver/tests/test_ConvUnify.cpp
paddle/gserver/tests/test_ConvUnify.cpp
+1
-2
paddle/gserver/tests/test_LayerGrad.cpp
paddle/gserver/tests/test_LayerGrad.cpp
+31
-10
未找到文件。
paddle/gserver/CMakeLists.txt
浏览文件 @
b8afb140
...
...
@@ -25,12 +25,16 @@ filter_test(GSERVER_HEADER)
filter_test
(
GSERVER_SOURCES
)
if
(
NOT WITH_GPU
)
list
(
REMOVE_ITEM GSERVER_HEADER
layers/CudnnConvBaseLayer.h
layers/CudnnConvLayer.h
layers/CudnnConvTransLayer.h
layers/CudnnPoolLayer.h
layers/CudnnBatchNormLayer.h
)
list
(
REMOVE_ITEM GSERVER_SOURCES
layers/CudnnConvBaseLayer.cpp
layers/CudnnConvLayer.cpp
layers/CudnnConvTransLayer.cpp
layers/CudnnPoolLayer.cpp
layers/CudnnBatchNormLayer.cpp
)
compile_cu_as_cpp
(
layers/LstmCompute.cu
)
...
...
paddle/gserver/layers/ConvBaseProjection.cpp
0 → 100644
浏览文件 @
b8afb140
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ConvBaseProjection.h"
#include "paddle/utils/Stat.h"
namespace
paddle
{
ThreadLocalD
<
std
::
vector
<
MemoryHandle
*>>
ConvBaseProjection
::
convMem_
;
ConvBaseProjection
::
ConvBaseProjection
(
const
ProjectionConfig
&
config
,
ParameterPtr
parameter
,
bool
useGpu
)
:
Projection
(
config
,
parameter
,
useGpu
)
{
CHECK
(
useGpu
);
// only support GPU
getConvParams
();
initCudnn
();
size_t
height
=
filterH_
*
filterW_
*
channels_
/
groups_
;
size_t
width
=
numFilters_
;
weight_
.
reset
(
new
Weight
(
height
,
width
,
parameter
));
weightOffset_
=
height
*
width
/
groups_
;
}
void
ConvBaseProjection
::
getConvParams
()
{
const
ConvConfig
&
conf
=
config_
.
conv_conf
();
paddingH_
=
conf
.
padding_y
();
paddingW_
=
conf
.
padding
();
strideH_
=
conf
.
stride_y
();
strideW_
=
conf
.
stride
();
filterH_
=
conf
.
filter_size_y
();
filterW_
=
conf
.
filter_size
();
configImgH_
=
conf
.
has_img_size_y
()
?
conf
.
img_size_y
()
:
conf
.
img_size
();
configImgW_
=
conf
.
img_size
();
configOutH_
=
conf
.
has_output_y
()
?
conf
.
output_y
()
:
conf
.
output_x
();
configOutW_
=
conf
.
output_x
();
configChannels_
=
conf
.
channels
();
configNumFilters_
=
config_
.
num_filters
();
isDeconv_
=
(
config_
.
type
()
==
"conv"
)
?
false
:
true
;
channels_
=
(
isDeconv_
)
?
configNumFilters_
:
configChannels_
;
numFilters_
=
(
isDeconv_
)
?
configChannels_
:
configNumFilters_
;
groups_
=
conf
.
groups
();
CHECK_EQ
(
channels_
%
groups_
,
0
);
CHECK_EQ
(
numFilters_
%
groups_
,
0
);
}
void
ConvBaseProjection
::
initCudnn
()
{
hl_create_filter_descriptor
(
&
filterDesc_
,
channels_
/
groups_
,
numFilters_
/
groups_
,
filterH_
,
filterW_
);
hl_create_tensor_descriptor
(
&
imageDesc_
);
hl_create_tensor_descriptor
(
&
outputDesc_
);
hl_create_convolution_descriptor
(
&
convDesc_
,
imageDesc_
,
filterDesc_
,
paddingH_
,
paddingW_
,
strideH_
,
strideW_
);
// initialize all to default algorithms
fwdAlgo_
=
0
;
bwdFilterAlgo_
=
0
;
bwdDataAlgo_
=
0
;
fwdLimitBytes_
=
0
;
bwdDataLimitBytes_
=
0
;
bwdFilterLimitBytes_
=
0
;
workSpaceInBytes_
=
0
;
batchNum_
=
0
;
isSelectAlgo_
=
false
;
}
void
ConvBaseProjection
::
reshapeTensorDesc
(
int
batchSize
)
{
hl_tensor_reshape
(
imageDesc_
,
batchSize
,
channels_
/
groups_
,
imageH_
,
imageW_
,
channels_
*
imageH_
*
imageW_
,
imageH_
*
imageW_
,
imageW_
,
1
);
hl_reset_convolution_descriptor
(
convDesc_
,
imageDesc_
,
filterDesc_
,
paddingH_
,
paddingW_
,
strideH_
,
strideW_
);
// The stride between two consecutive images in ConvProjection may not be 1,
// for example, in the case of layer ConcatenateLayer2 with two
// ConvProjection, the stride is the output_size of layer ConcatenateLayer2.
// So the calculation of nStride is different from CudnnConvLayer.
// In fact, only "nStride = out_->value->getStride()" is ok.
// size_t nStride = numFilters_ * outputH_ * outputW_;
// if (out_->value->isContiguous()) {
// CHECK_EQ(nStride, out_->value->getWidth());
// } else {
// nStride = out_->value->getStride();
// }
size_t
nStride
=
out_
->
value
->
getStride
();
hl_tensor_reshape
(
outputDesc_
,
batchSize
,
numFilters_
/
groups_
,
outputH_
,
outputW_
,
nStride
,
outputH_
*
outputW_
,
outputW_
,
1
);
}
void
ConvBaseProjection
::
reshape
(
int
batchSize
)
{
size_t
width
=
calOutputSize
();
CHECK_EQ
(
width
,
out_
->
value
->
getWidth
());
if
(
isDeconv_
)
{
CHECK_EQ
(
static_cast
<
size_t
>
(
configChannels_
*
outputH_
*
outputW_
),
in_
->
value
->
getWidth
())
<<
"Wrong input size for convolution transpose"
<<
" channels="
<<
configChannels_
<<
" outputH="
<<
outputH_
<<
" outputW="
<<
outputW_
<<
" inputSize="
<<
in_
->
value
->
getWidth
();
}
else
{
CHECK_EQ
(
static_cast
<
size_t
>
(
configChannels_
*
imageH_
*
imageW_
),
in_
->
value
->
getWidth
())
<<
"Wrong input size for convolution"
<<
" channels="
<<
configChannels_
<<
" imageH="
<<
imageH_
<<
" imageW="
<<
imageW_
<<
" inputSize="
<<
in_
->
value
->
getWidth
();
}
isSelectAlgo_
=
(
batchSize
==
batchNum_
);
batchNum_
=
batchSize
;
if
(
!
isSelectAlgo_
)
{
reshapeTensorDesc
(
batchSize
);
hl_conv_workspace
(
imageDesc_
,
outputDesc_
,
filterDesc_
,
convDesc_
,
&
fwdAlgo_
,
&
fwdLimitBytes_
,
&
bwdDataAlgo_
,
&
bwdDataLimitBytes_
,
&
bwdFilterAlgo_
,
&
bwdFilterLimitBytes_
);
size_t
maxWorkSpace
=
0
;
maxWorkSpace
=
std
::
max
(
fwdLimitBytes_
,
bwdDataLimitBytes_
);
maxWorkSpace
=
std
::
max
(
maxWorkSpace
,
bwdFilterLimitBytes_
);
workSpaceInBytes_
=
maxWorkSpace
;
VLOG
(
3
)
<<
getName
()
<<
" Fwd / BwdData / BwdFilter algo: "
<<
fwdAlgo_
<<
" / "
<<
bwdDataAlgo_
<<
" / "
<<
bwdFilterAlgo_
;
}
isSelectAlgo_
=
true
;
}
void
*
ConvBaseProjection
::
getSpaceBytes
(
size_t
size
)
{
std
::
vector
<
MemoryHandle
*>
&
convMem
=
*
convMem_
;
if
(
convMem
.
empty
())
{
int
numDevices
=
hl_get_device_count
();
convMem
.
resize
(
numDevices
);
}
int
devId
=
hl_get_device
();
MemoryHandle
**
localMem
=
&
(
convMem
[
devId
]);
if
(
NULL
==
*
localMem
||
size
>
(
*
localMem
)
->
getAllocSize
())
{
*
localMem
=
new
GpuMemoryHandle
(
size
);
}
return
(
*
localMem
)
->
getBuf
();
}
ConvBaseProjection
::~
ConvBaseProjection
()
{
hl_destroy_tensor_descriptor
(
imageDesc_
);
hl_destroy_tensor_descriptor
(
outputDesc_
);
hl_destroy_filter_descriptor
(
filterDesc_
);
hl_destroy_convolution_descriptor
(
convDesc_
);
}
}
// namespace paddle
paddle/gserver/layers/ConvBaseProjection.h
0 → 100644
浏览文件 @
b8afb140
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "Projection.h"
#include "paddle/math/MathUtils.h"
namespace
paddle
{
/**
* @brief Base class for ConvProjection and ConvTransProjection.
*/
class
ConvBaseProjection
:
public
Projection
{
public:
/**
* Constructor.
*/
ConvBaseProjection
(
const
ProjectionConfig
&
config
,
ParameterPtr
parameter
,
bool
useGpu
);
~
ConvBaseProjection
();
protected:
void
getConvParams
();
void
initCudnn
();
void
reshapeTensorDesc
(
int
batchSize
);
void
reshape
(
int
batchSize
);
size_t
calOutputSize
()
{
if
(
isDeconv_
)
{
outputH_
=
in_
->
getFrameHeight
();
outputW_
=
in_
->
getFrameWidth
();
if
(
outputH_
==
0
)
outputH_
=
configOutH_
;
if
(
outputW_
==
0
)
outputW_
=
configOutW_
;
imageH_
=
imageSize
(
outputH_
,
filterH_
,
paddingH_
,
strideH_
,
/* caffeMode */
true
);
imageW_
=
imageSize
(
outputW_
,
filterW_
,
paddingW_
,
strideW_
,
/* caffeMode */
true
);
const_cast
<
Argument
*>
(
out_
)
->
setFrameHeight
(
imageH_
);
const_cast
<
Argument
*>
(
out_
)
->
setFrameWidth
(
imageW_
);
inputOffset_
=
(
configChannels_
/
groups_
)
*
outputH_
*
outputW_
;
outputOffset_
=
(
configNumFilters_
/
groups_
)
*
imageH_
*
imageW_
;
return
imageH_
*
imageW_
*
configNumFilters_
;
}
else
{
imageH_
=
in_
->
getFrameHeight
();
imageW_
=
in_
->
getFrameWidth
();
if
(
imageH_
==
0
)
imageH_
=
configImgH_
;
if
(
imageW_
==
0
)
imageW_
=
configImgW_
;
outputH_
=
outputSize
(
imageH_
,
filterH_
,
paddingH_
,
strideH_
,
/* caffeMode */
true
);
outputW_
=
outputSize
(
imageW_
,
filterW_
,
paddingW_
,
strideW_
,
/* caffeMode */
true
);
const_cast
<
Argument
*>
(
out_
)
->
setFrameHeight
(
outputH_
);
const_cast
<
Argument
*>
(
out_
)
->
setFrameWidth
(
outputW_
);
inputOffset_
=
(
configChannels_
/
groups_
)
*
imageH_
*
imageW_
;
outputOffset_
=
(
configNumFilters_
/
groups_
)
*
outputH_
*
outputW_
;
return
outputH_
*
outputW_
*
configNumFilters_
;
}
}
static
void
*
getSpaceBytes
(
size_t
size
);
/// True if it's deconv projection layer, false if it's ConvProjection layer
bool
isDeconv_
;
/// imageH_ and imageW_ / outputH_ and outputW_
/// is calculated from the input layer.
int
imageH_
,
imageW_
;
int
outputH_
,
outputW_
;
/// configImgH_ and configImgW_ / configOutH_ and configOutW_
/// is obtained from config.
int
configImgH_
,
configImgW_
;
int
configOutH_
,
configOutW_
;
/// channels_ and numFilters_ are defined in terms of convolution semantics
int
channels_
,
numFilters_
;
/// configChannels and configNumFilters_ are obtained from config
/// For Conv they are the same as channels_ and numFilters
/// For ConvTrans they are opposite to channels_ and numFilters
int
configChannels_
,
configNumFilters_
;
int
paddingH_
,
paddingW_
;
int
strideH_
,
strideW_
;
int
filterH_
,
filterW_
;
/// One group offset of input data.
int
inputOffset_
;
/// One group offset of output data.
int
outputOffset_
;
/// One group offset of weight.
int
weightOffset_
;
int
groups_
;
/// Cudnn tensor descriptor for input.
hl_tensor_descriptor
imageDesc_
;
/// Cudnn tensor descriptor for output.
hl_tensor_descriptor
outputDesc_
;
/// Cudnn tensor descriptor for filter.
hl_filter_descriptor
filterDesc_
;
/// Cudnn tensor descriptor for a convolution operation.
hl_convolution_descriptor
convDesc_
;
/// Record the algorithm for forward convolution, which is obtained by cudnn
/// api to search the best suited algorithm.
int
fwdAlgo_
;
/// Record the algorithm for computing convolution gradient with respect to
/// filter coefficients.
int
bwdFilterAlgo_
;
/// Record the algorithm for computing convolution gradient with respect to
/// the output.
int
bwdDataAlgo_
;
/// Amount of GPU memory needed as workspace to be able to execute a
/// forward convolution with the specified algo.
size_t
fwdLimitBytes_
;
/// Amount of GPU memory needed as workspace to be able to execute a
/// backwardFilter with the specified algo.
size_t
bwdDataLimitBytes_
;
/// Amount of GPU memory needed as workspace to be able to execute a
/// backwardData with the specified algo.
size_t
bwdFilterLimitBytes_
;
/// Size of total work space.
size_t
workSpaceInBytes_
;
/// Whether to call cuDNN api to choose conv algorithm.
bool
isSelectAlgo_
;
/// batchNum is used to record batch size. If the batch size is changed,
/// the selection algorithm will be called.
int
batchNum_
;
bool
bias_
;
std
::
unique_ptr
<
Weight
>
weight_
;
static
ThreadLocalD
<
std
::
vector
<
MemoryHandle
*>>
convMem_
;
};
}
// namespace paddle
paddle/gserver/layers/ConvProjection.cpp
浏览文件 @
b8afb140
...
...
@@ -19,151 +19,6 @@ namespace paddle {
REGISTER_PROJECTION
(
conv
,
ConvProjection
);
ThreadLocalD
<
std
::
vector
<
MemoryHandle
*>>
ConvProjection
::
convMem_
;
ConvProjection
::
ConvProjection
(
const
ProjectionConfig
&
config
,
ParameterPtr
parameter
,
bool
useGpu
)
:
Projection
(
config
,
parameter
,
useGpu
)
{
CHECK
(
useGpu
);
// only support GPU
getConvParams
();
initCudnn
();
size_t
height
=
filterH_
*
filterW_
*
channels_
/
groups_
;
size_t
width
=
numFilters_
;
weight_
.
reset
(
new
Weight
(
height
,
width
,
parameter
));
weightOffset_
=
height
*
width
/
groups_
;
}
void
ConvProjection
::
getConvParams
()
{
const
ConvConfig
&
conf
=
config_
.
conv_conf
();
paddingH_
=
conf
.
padding_y
();
paddingW_
=
conf
.
padding
();
strideH_
=
conf
.
stride_y
();
strideW_
=
conf
.
stride
();
filterH_
=
conf
.
filter_size_y
();
filterW_
=
conf
.
filter_size
();
configImgH_
=
conf
.
has_img_size_y
()
?
conf
.
img_size_y
()
:
conf
.
img_size
();
configImgW_
=
conf
.
img_size
();
channels_
=
conf
.
channels
();
numFilters_
=
config_
.
num_filters
();
groups_
=
conf
.
groups
();
CHECK_EQ
(
channels_
%
groups_
,
0
);
CHECK_EQ
(
numFilters_
%
groups_
,
0
);
}
void
ConvProjection
::
initCudnn
()
{
hl_create_filter_descriptor
(
&
filterDesc_
,
channels_
/
groups_
,
numFilters_
/
groups_
,
filterH_
,
filterW_
);
hl_create_tensor_descriptor
(
&
inputDesc_
);
hl_create_tensor_descriptor
(
&
outputDesc_
);
hl_create_convolution_descriptor
(
&
convDesc_
,
inputDesc_
,
filterDesc_
,
paddingH_
,
paddingW_
,
strideH_
,
strideW_
);
// initialize all to default algorithms
fwdAlgo_
=
0
;
bwdFilterAlgo_
=
0
;
bwdDataAlgo_
=
0
;
fwdLimitBytes_
=
0
;
bwdDataLimitBytes_
=
0
;
bwdFilterLimitBytes_
=
0
;
workSpaceInBytes_
=
0
;
batchNum_
=
0
;
isSelectAlgo_
=
false
;
}
void
ConvProjection
::
reshapeTensorDesc
(
int
batchSize
)
{
hl_tensor_reshape
(
inputDesc_
,
batchSize
,
channels_
/
groups_
,
imageH_
,
imageW_
,
channels_
*
imageH_
*
imageW_
,
imageH_
*
imageW_
,
imageW_
,
1
);
hl_reset_convolution_descriptor
(
convDesc_
,
inputDesc_
,
filterDesc_
,
paddingH_
,
paddingW_
,
strideH_
,
strideW_
);
// The stride between two consecutive images in ConvProjection may not be 1,
// for example, in the case of layer ConcatenateLayer2 with two
// ConvProjection, the stride is the output_size of layer ConcatenateLayer2.
// So the calculation of nStride is different from CudnnConvLayer.
// In fact, only "nStride = out_->value->getStride()" is ok.
size_t
nStride
=
numFilters_
*
outputH_
*
outputW_
;
if
(
out_
->
value
->
isContiguous
())
{
CHECK_EQ
(
nStride
,
out_
->
value
->
getWidth
());
}
else
{
nStride
=
out_
->
value
->
getStride
();
}
hl_tensor_reshape
(
outputDesc_
,
batchSize
,
numFilters_
/
groups_
,
outputH_
,
outputW_
,
nStride
,
outputH_
*
outputW_
,
outputW_
,
1
);
}
void
ConvProjection
::
reshape
(
int
batchSize
)
{
size_t
width
=
calOutputSize
();
CHECK_EQ
(
width
,
out_
->
value
->
getWidth
());
CHECK_EQ
(
static_cast
<
size_t
>
(
channels_
*
imageH_
*
imageW_
),
in_
->
value
->
getWidth
())
<<
"Wrong input size for convolution"
<<
" channels="
<<
channels_
<<
" imageH="
<<
imageH_
<<
" imageW="
<<
imageW_
<<
" inputSize="
<<
in_
->
value
->
getWidth
();
isSelectAlgo_
=
(
batchSize
==
batchNum_
);
batchNum_
=
batchSize
;
if
(
!
isSelectAlgo_
)
{
reshapeTensorDesc
(
batchSize
);
hl_conv_workspace
(
inputDesc_
,
outputDesc_
,
filterDesc_
,
convDesc_
,
&
fwdAlgo_
,
&
fwdLimitBytes_
,
&
bwdDataAlgo_
,
&
bwdDataLimitBytes_
,
&
bwdFilterAlgo_
,
&
bwdFilterLimitBytes_
);
size_t
maxWorkSpace
=
0
;
maxWorkSpace
=
std
::
max
(
fwdLimitBytes_
,
bwdDataLimitBytes_
);
maxWorkSpace
=
std
::
max
(
maxWorkSpace
,
bwdFilterLimitBytes_
);
workSpaceInBytes_
=
maxWorkSpace
;
VLOG
(
3
)
<<
getName
()
<<
" Fwd / BwdData / BwdFilter algo: "
<<
fwdAlgo_
<<
" / "
<<
bwdDataAlgo_
<<
" / "
<<
bwdFilterAlgo_
;
}
isSelectAlgo_
=
true
;
}
void
ConvProjection
::
forward
()
{
int
batchSize
=
in_
->
value
->
getHeight
();
reshape
(
batchSize
);
...
...
@@ -179,7 +34,7 @@ void ConvProjection::forward() {
real
*
inputData
=
in_
->
value
->
getData
()
+
g
*
inputOffset_
;
real
*
wgtData
=
weight_
->
getW
()
->
getData
()
+
g
*
weightOffset_
;
real
*
outData
=
out_
->
value
->
getData
()
+
g
*
outputOffset_
;
hl_convolution_forward
(
i
nput
Desc_
,
hl_convolution_forward
(
i
mage
Desc_
,
inputData
,
outputDesc_
,
outData
,
...
...
@@ -205,7 +60,7 @@ void ConvProjection::backward(const UpdateCallback &callback) {
if
(
weight_
->
getWGrad
())
{
real
*
inputData
=
in_
->
value
->
getData
()
+
g
*
inputOffset_
;
real
*
weightGrad
=
weight_
->
getWGrad
()
->
getData
()
+
g
*
weightOffset_
;
hl_convolution_backward_filter
(
i
nput
Desc_
,
hl_convolution_backward_filter
(
i
mage
Desc_
,
inputData
,
outputDesc_
,
outGrad
,
...
...
@@ -221,7 +76,7 @@ void ConvProjection::backward(const UpdateCallback &callback) {
if
(
NULL
!=
preGrad
)
{
real
*
inputGrad
=
preGrad
->
getData
()
+
g
*
inputOffset_
;
real
*
wgtData
=
weight_
->
getW
()
->
getData
()
+
g
*
weightOffset_
;
hl_convolution_backward_data
(
i
nput
Desc_
,
hl_convolution_backward_data
(
i
mage
Desc_
,
inputGrad
,
outputDesc_
,
outGrad
,
...
...
@@ -237,26 +92,4 @@ void ConvProjection::backward(const UpdateCallback &callback) {
weight_
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
void
*
ConvProjection
::
getSpaceBytes
(
size_t
size
)
{
std
::
vector
<
MemoryHandle
*>
&
convMem
=
*
convMem_
;
if
(
convMem
.
empty
())
{
int
numDevices
=
hl_get_device_count
();
convMem
.
resize
(
numDevices
);
}
int
devId
=
hl_get_device
();
MemoryHandle
**
localMem
=
&
(
convMem
[
devId
]);
if
(
NULL
==
*
localMem
||
size
>
(
*
localMem
)
->
getAllocSize
())
{
*
localMem
=
new
GpuMemoryHandle
(
size
);
}
return
(
*
localMem
)
->
getBuf
();
}
ConvProjection
::~
ConvProjection
()
{
hl_destroy_tensor_descriptor
(
inputDesc_
);
hl_destroy_tensor_descriptor
(
outputDesc_
);
hl_destroy_filter_descriptor
(
filterDesc_
);
hl_destroy_convolution_descriptor
(
convDesc_
);
}
}
// namespace paddle
paddle/gserver/layers/ConvProjection.h
浏览文件 @
b8afb140
...
...
@@ -14,7 +14,7 @@ limitations under the License. */
#pragma once
#include "Projection.h"
#include "
ConvBase
Projection.h"
#include "paddle/math/MathUtils.h"
namespace
paddle
{
...
...
@@ -22,109 +22,20 @@ namespace paddle {
/**
* @brief Convolution projection do the same calculation with CudnnConvLayer.
*/
class
ConvProjection
:
public
Projection
{
class
ConvProjection
:
public
ConvBase
Projection
{
public:
/**
* Constructor.
*/
ConvProjection
(
const
ProjectionConfig
&
config
,
ParameterPtr
parameter
,
bool
useGpu
);
bool
useGpu
)
:
ConvBaseProjection
(
config
,
parameter
,
useGpu
)
{}
~
ConvProjection
()
;
~
ConvProjection
()
{}
virtual
void
forward
();
virtual
void
backward
(
const
UpdateCallback
&
callback
);
protected:
void
getConvParams
();
void
initCudnn
();
void
reshapeTensorDesc
(
int
batchSize
);
void
reshape
(
int
batchSize
);
size_t
calOutputSize
()
{
imageH_
=
in_
->
getFrameHeight
();
imageW_
=
in_
->
getFrameWidth
();
if
(
imageH_
==
0
)
imageH_
=
configImgH_
;
if
(
imageW_
==
0
)
imageW_
=
configImgW_
;
outputH_
=
outputSize
(
imageH_
,
filterH_
,
paddingH_
,
strideH_
,
/* caffeMode */
true
);
outputW_
=
outputSize
(
imageW_
,
filterW_
,
paddingW_
,
strideW_
,
/* caffeMode */
true
);
const_cast
<
Argument
*>
(
out_
)
->
setFrameHeight
(
outputH_
);
const_cast
<
Argument
*>
(
out_
)
->
setFrameWidth
(
outputW_
);
inputOffset_
=
(
channels_
/
groups_
)
*
imageH_
*
imageW_
;
outputOffset_
=
(
numFilters_
/
groups_
)
*
outputH_
*
outputW_
;
return
outputH_
*
outputW_
*
numFilters_
;
}
static
void
*
getSpaceBytes
(
size_t
size
);
/// imageH_ and imageW_ is calculated from the input layer.
int
imageH_
,
imageW_
;
/// configImgH_ and configImgW_ is obtained from config.
int
configImgH_
,
configImgW_
;
int
outputH_
,
outputW_
;
int
channels_
,
numFilters_
;
int
paddingH_
,
paddingW_
;
int
strideH_
,
strideW_
;
int
filterH_
,
filterW_
;
/// One group offset of input data.
int
inputOffset_
;
/// One group offset of output data.
int
outputOffset_
;
/// One group offset of weight.
int
weightOffset_
;
int
groups_
;
/// Cudnn tensor descriptor for input.
hl_tensor_descriptor
inputDesc_
;
/// Cudnn tensor descriptor for output.
hl_tensor_descriptor
outputDesc_
;
/// Cudnn tensor descriptor for filter.
hl_filter_descriptor
filterDesc_
;
/// Cudnn tensor descriptor for a convolution operation.
hl_convolution_descriptor
convDesc_
;
/// Record the algorithm for forward convolution, which is obtained by cudnn
/// api to search the best suited algorithm.
int
fwdAlgo_
;
/// Record the algorithm for computing convolution gradient with respect to
/// filter coefficients.
int
bwdFilterAlgo_
;
/// Record the algorithm for computing convolution gradient with respect to
/// the output.
int
bwdDataAlgo_
;
/// Amount of GPU memory needed as workspace to be able to execute a
/// forward convolution with the specified algo.
size_t
fwdLimitBytes_
;
/// Amount of GPU memory needed as workspace to be able to execute a
/// backwardFilter with the specified algo.
size_t
bwdDataLimitBytes_
;
/// Amount of GPU memory needed as workspace to be able to execute a
/// backwardData with the specified algo.
size_t
bwdFilterLimitBytes_
;
/// Size of total work space.
size_t
workSpaceInBytes_
;
/// Whether to call cuDNN api to choose conv algorithm.
bool
isSelectAlgo_
;
/// batchNum is used to record batch size. If the batch size is changed,
/// the selection algorithm will be called.
int
batchNum_
;
bool
bias_
;
std
::
unique_ptr
<
Weight
>
weight_
;
static
ThreadLocalD
<
std
::
vector
<
MemoryHandle
*>>
convMem_
;
};
}
// namespace paddle
paddle/gserver/layers/ConvTransProjection.cpp
0 → 100644
浏览文件 @
b8afb140
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "ConvTransProjection.h"
#include "paddle/utils/Stat.h"
namespace
paddle
{
REGISTER_PROJECTION
(
convt
,
ConvTransProjection
);
void
ConvTransProjection
::
forward
()
{
int
batchSize
=
in_
->
value
->
getHeight
();
reshape
(
batchSize
);
void
*
workSpace
=
NULL
;
if
(
workSpaceInBytes_
>
0
)
{
workSpace
=
getSpaceBytes
(
workSpaceInBytes_
);
}
for
(
int
g
=
0
;
g
<
groups_
;
++
g
)
{
REGISTER_TIMER_INFO
(
"CudnnConvTransFwTimer"
,
getName
().
c_str
());
real
*
inData
=
in_
->
value
->
getData
()
+
g
*
inputOffset_
;
real
*
wgtData
=
weight_
->
getW
()
->
getData
()
+
g
*
weightOffset_
;
real
*
outData
=
out_
->
value
->
getData
()
+
g
*
outputOffset_
;
hl_convolution_backward_data
(
imageDesc_
,
outData
,
outputDesc_
,
inData
,
filterDesc_
,
wgtData
,
convDesc_
,
workSpace
,
bwdDataLimitBytes_
,
bwdDataAlgo_
);
}
}
void
ConvTransProjection
::
backward
(
const
UpdateCallback
&
callback
)
{
REGISTER_TIMER_INFO
(
"CudnnConvTransBpTimer"
,
getName
().
c_str
());
void
*
workSpace
=
NULL
;
if
(
workSpaceInBytes_
>
0
)
{
workSpace
=
getSpaceBytes
(
workSpaceInBytes_
);
}
for
(
int
g
=
0
;
g
<
groups_
;
++
g
)
{
real
*
outGrad
=
out_
->
grad
->
getData
()
+
g
*
outputOffset_
;
if
(
weight_
->
getWGrad
())
{
real
*
inData
=
in_
->
value
->
getData
()
+
g
*
inputOffset_
;
real
*
weightGrad
=
weight_
->
getWGrad
()
->
getData
()
+
g
*
weightOffset_
;
hl_convolution_backward_filter
(
imageDesc_
,
outGrad
,
outputDesc_
,
inData
,
filterDesc_
,
weightGrad
,
convDesc_
,
workSpace
,
bwdFilterLimitBytes_
,
bwdFilterAlgo_
);
}
MatrixPtr
preGrad
=
in_
->
grad
;
if
(
NULL
!=
preGrad
)
{
real
*
inGrad
=
preGrad
->
getData
()
+
g
*
inputOffset_
;
real
*
wgtData
=
weight_
->
getW
()
->
getData
()
+
g
*
weightOffset_
;
hl_convolution_forward
(
imageDesc_
,
outGrad
,
outputDesc_
,
inGrad
,
filterDesc_
,
wgtData
,
convDesc_
,
workSpace
,
fwdLimitBytes_
,
fwdAlgo_
);
}
}
weight_
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
}
// namespace paddle
paddle/gserver/layers/ConvTransProjection.h
0 → 100644
浏览文件 @
b8afb140
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "ConvBaseProjection.h"
#include "paddle/math/MathUtils.h"
namespace
paddle
{
/**
* @brief Convolution projection do the same calculation with CudnnConvLayer.
*/
class
ConvTransProjection
:
public
ConvBaseProjection
{
public:
/**
* Constructor.
*/
ConvTransProjection
(
const
ProjectionConfig
&
config
,
ParameterPtr
parameter
,
bool
useGpu
)
:
ConvBaseProjection
(
config
,
parameter
,
useGpu
)
{}
~
ConvTransProjection
()
{}
virtual
void
forward
();
virtual
void
backward
(
const
UpdateCallback
&
callback
);
};
}
// namespace paddle
paddle/gserver/layers/CudnnConvBaseLayer.cpp
0 → 100644
浏览文件 @
b8afb140
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "CudnnConvBaseLayer.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
namespace
paddle
{
bool
CudnnConvBaseLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
if
(
!
ConvBaseLayer
::
init
(
layerMap
,
parameterMap
))
return
false
;
CHECK
(
useGpu_
)
<<
"CudnnConvLayer only support gpu"
;
CHECK_EQ
(
inputLayers_
.
size
(),
parameters_
.
size
());
projections_
.
reserve
(
inputLayers_
.
size
());
projConf_
.
reserve
(
inputLayers_
.
size
());
numFilters_
=
config_
.
num_filters
();
CHECK
(
config_
.
shared_biases
());
for
(
size_t
i
=
0
;
i
<
inputLayers_
.
size
();
i
++
)
{
ProjectionConfig
*
conf
=
new
ProjectionConfig
();
if
(
isDeconv_
)
{
conf
->
set_type
(
"convt"
);
}
else
{
conf
->
set_type
(
"conv"
);
}
conf
->
set_num_filters
(
numFilters_
);
ConvConfig
*
convConf
=
conf
->
mutable_conv_conf
();
*
convConf
=
*
(
config_
.
mutable_inputs
(
i
)
->
mutable_conv_conf
());
conf
->
set_input_size
(
getPrev
(
i
)
->
getSize
());
conf
->
set_output_size
(
getSize
());
projConf_
.
emplace_back
(
conf
);
projections_
.
emplace_back
(
Projection
::
create
(
*
projConf_
[
i
],
parameters_
[
i
],
useGpu_
));
}
if
(
biases_
.
get
()
&&
sharedBiases_
)
{
hl_create_tensor_descriptor
(
&
biasDesc_
);
hl_create_tensor_descriptor
(
&
outputDesc_
);
hl_tensor_reshape
(
biasDesc_
,
1
,
numFilters_
,
1
,
1
);
}
return
true
;
}
void
CudnnConvBaseLayer
::
forward
(
PassType
passType
)
{
Layer
::
forward
(
passType
);
int
batchSize
=
getInput
(
0
).
getBatchSize
();
resetOutput
(
batchSize
,
calOutputSize
());
for
(
size_t
i
=
0
;
i
!=
inputLayers_
.
size
();
++
i
)
{
projections_
[
i
]
->
forward
(
&
getInput
(
i
),
&
getOutput
(),
passType
);
}
if
(
biases_
)
{
REGISTER_TIMER_INFO
(
"CudnnConvBiasTimer"
,
getName
().
c_str
());
int
batchSize
=
inputLayers_
[
0
]
->
getOutputValue
()
->
getHeight
();
int
outH
,
outW
;
if
(
isDeconv_
)
{
outH
=
imgSizeH_
[
0
];
outW
=
imgSizeW_
[
0
];
}
else
{
outH
=
outputH_
[
0
];
outW
=
outputW_
[
0
];
}
hl_tensor_reshape
(
outputDesc_
,
batchSize
,
numFilters_
,
outH
,
outW
,
numFilters_
*
outH
*
outW
,
outH
*
outW
,
outW
,
1
);
real
*
outData
=
getOutputValue
()
->
getData
();
real
*
biasData
=
biases_
->
getW
()
->
getData
();
hl_convolution_forward_add_bias
(
biasDesc_
,
biasData
,
outputDesc_
,
outData
);
}
forwardActivation
();
}
void
CudnnConvBaseLayer
::
backward
(
const
UpdateCallback
&
callback
)
{
backwardActivation
();
if
(
biases_
&&
biases_
->
getWGrad
())
{
REGISTER_TIMER_INFO
(
"CudnnConvBpBiasTimer"
,
getName
().
c_str
());
real
*
biasGrad
=
biases_
->
getWGrad
()
->
getData
();
real
*
outGrad
=
getOutputGrad
()
->
getData
();
hl_convolution_backward_bias
(
biasDesc_
,
biasGrad
,
outputDesc_
,
outGrad
);
biases_
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
for
(
size_t
i
=
0
;
i
!=
inputLayers_
.
size
();
++
i
)
{
projections_
[
i
]
->
backward
(
callback
);
}
}
CudnnConvBaseLayer
::~
CudnnConvBaseLayer
()
{
if
(
biases_
)
{
hl_destroy_tensor_descriptor
(
biasDesc_
);
hl_destroy_tensor_descriptor
(
outputDesc_
);
}
}
}
// namespace paddle
paddle/gserver/layers/CudnnConvBaseLayer.h
0 → 100644
浏览文件 @
b8afb140
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "ConvBaseLayer.h"
#include "Projection.h"
#include "paddle/math/Matrix.h"
namespace
paddle
{
/**
* @brief A 2-dimension conv layer implemented by cuDNN. It only
* supports GPU mode. We automatic select CudnnConvLayer for GPU
* mode and ExpandConvLayer for CPU mode if you set type of "conv".
* User also can specfiy type of "exconv" or "cudnn_conv" for
* particular type.
*
* The config file api is img_conv_layer.
*/
class
CudnnConvBaseLayer
:
public
ConvBaseLayer
{
protected:
std
::
vector
<
std
::
unique_ptr
<
ProjectionConfig
>>
projConf_
;
std
::
vector
<
std
::
unique_ptr
<
Projection
>>
projections_
;
hl_tensor_descriptor
biasDesc_
;
hl_tensor_descriptor
outputDesc_
;
public:
explicit
CudnnConvBaseLayer
(
const
LayerConfig
&
config
)
:
ConvBaseLayer
(
config
)
{}
~
CudnnConvBaseLayer
();
void
forward
(
PassType
passType
)
override
;
void
backward
(
const
UpdateCallback
&
callback
)
override
;
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
override
;
};
}
// namespace paddle
paddle/gserver/layers/CudnnConvLayer.cpp
浏览文件 @
b8afb140
...
...
@@ -20,97 +20,4 @@ namespace paddle {
REGISTER_LAYER
(
cudnn_conv
,
CudnnConvLayer
);
bool
CudnnConvLayer
::
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
{
if
(
!
ConvBaseLayer
::
init
(
layerMap
,
parameterMap
))
return
false
;
CHECK
(
useGpu_
)
<<
"CudnnConvLayer only support gpu"
;
CHECK_EQ
(
inputLayers_
.
size
(),
parameters_
.
size
());
projections_
.
reserve
(
inputLayers_
.
size
());
projConf_
.
reserve
(
inputLayers_
.
size
());
numFilters_
=
config_
.
num_filters
();
CHECK
(
config_
.
shared_biases
());
for
(
size_t
i
=
0
;
i
<
inputLayers_
.
size
();
i
++
)
{
ProjectionConfig
*
conf
=
new
ProjectionConfig
();
conf
->
set_type
(
"conv"
);
conf
->
set_num_filters
(
numFilters_
);
ConvConfig
*
convConf
=
conf
->
mutable_conv_conf
();
*
convConf
=
*
(
config_
.
mutable_inputs
(
i
)
->
mutable_conv_conf
());
conf
->
set_input_size
(
getPrev
(
i
)
->
getSize
());
conf
->
set_output_size
(
getSize
());
projConf_
.
emplace_back
(
conf
);
projections_
.
emplace_back
(
Projection
::
create
(
*
projConf_
[
i
],
parameters_
[
i
],
useGpu_
));
}
if
(
biases_
.
get
()
&&
sharedBiases_
)
{
hl_create_tensor_descriptor
(
&
biasDesc_
);
hl_create_tensor_descriptor
(
&
outputDesc_
);
hl_tensor_reshape
(
biasDesc_
,
1
,
numFilters_
/
groups_
[
0
],
1
,
1
);
biasOffset_
=
numFilters_
/
groups_
[
0
];
}
return
true
;
}
void
CudnnConvLayer
::
forward
(
PassType
passType
)
{
Layer
::
forward
(
passType
);
int
batchSize
=
getInput
(
0
).
getBatchSize
();
resetOutput
(
batchSize
,
calOutputSize
());
for
(
size_t
i
=
0
;
i
!=
inputLayers_
.
size
();
++
i
)
{
projections_
[
i
]
->
forward
(
&
getInput
(
i
),
&
getOutput
(),
passType
);
}
if
(
biases_
)
{
REGISTER_TIMER_INFO
(
"CudnnConvBiasTimer"
,
getName
().
c_str
());
int
batchSize
=
inputLayers_
[
0
]
->
getOutputValue
()
->
getHeight
();
hl_tensor_reshape
(
outputDesc_
,
batchSize
,
numFilters_
/
groups_
[
0
],
outputH_
[
0
],
outputW_
[
0
],
numFilters_
*
outputH_
[
0
]
*
outputW_
[
0
],
outputH_
[
0
]
*
outputW_
[
0
],
outputW_
[
0
],
1
);
outputOffset_
=
getOutputValue
()
->
getWidth
()
/
groups_
[
0
];
for
(
int
g
=
0
;
g
<
groups_
[
0
];
++
g
)
{
real
*
biasData
=
biases_
->
getW
()
->
getData
()
+
biasOffset_
*
g
;
real
*
outData
=
getOutputValue
()
->
getData
()
+
outputOffset_
*
g
;
hl_convolution_forward_add_bias
(
biasDesc_
,
biasData
,
outputDesc_
,
outData
);
}
}
forwardActivation
();
}
void
CudnnConvLayer
::
backward
(
const
UpdateCallback
&
callback
)
{
backwardActivation
();
if
(
biases_
&&
biases_
->
getWGrad
())
{
REGISTER_TIMER_INFO
(
"CudnnConvBpBiasTimer"
,
getName
().
c_str
());
for
(
int
g
=
0
;
g
<
groups_
[
0
];
++
g
)
{
real
*
biasGrad
=
biases_
->
getWGrad
()
->
getData
()
+
biasOffset_
*
g
;
real
*
outGrad
=
getOutputGrad
()
->
getData
()
+
outputOffset_
*
g
;
hl_convolution_backward_bias
(
biasDesc_
,
biasGrad
,
outputDesc_
,
outGrad
);
}
biases_
->
getParameterPtr
()
->
incUpdate
(
callback
);
}
for
(
size_t
i
=
0
;
i
!=
inputLayers_
.
size
();
++
i
)
{
projections_
[
i
]
->
backward
(
callback
);
}
}
CudnnConvLayer
::~
CudnnConvLayer
()
{
if
(
biases_
)
{
hl_destroy_tensor_descriptor
(
biasDesc_
);
hl_destroy_tensor_descriptor
(
outputDesc_
);
}
}
}
// namespace paddle
paddle/gserver/layers/CudnnConvLayer.h
浏览文件 @
b8afb140
...
...
@@ -15,7 +15,7 @@ limitations under the License. */
#pragma once
#include <vector>
#include "ConvBaseLayer.h"
#include "C
udnnC
onvBaseLayer.h"
#include "Projection.h"
#include "paddle/math/Matrix.h"
...
...
@@ -30,27 +30,12 @@ namespace paddle {
*
* The config file api is img_conv_layer.
*/
class
CudnnConvLayer
:
public
ConvBaseLayer
{
protected:
std
::
vector
<
std
::
unique_ptr
<
ProjectionConfig
>>
projConf_
;
std
::
vector
<
std
::
unique_ptr
<
Projection
>>
projections_
;
hl_tensor_descriptor
biasDesc_
;
hl_tensor_descriptor
outputDesc_
;
int
biasOffset_
;
int
outputOffset_
;
class
CudnnConvLayer
:
public
CudnnConvBaseLayer
{
public:
explicit
CudnnConvLayer
(
const
LayerConfig
&
config
)
:
ConvBaseLayer
(
config
)
{}
~
CudnnConvLayer
();
explicit
CudnnConvLayer
(
const
LayerConfig
&
config
)
:
CudnnConvBaseLayer
(
config
)
{}
bool
init
(
const
LayerMap
&
layerMap
,
const
ParameterMap
&
parameterMap
)
override
;
void
forward
(
PassType
passType
)
override
;
void
backward
(
const
UpdateCallback
&
callback
)
override
;
void
addBiases
();
void
bpropBiases
();
~
CudnnConvLayer
()
{}
};
}
// namespace paddle
paddle/gserver/layers/CudnnConvTransLayer.cpp
0 → 100644
浏览文件 @
b8afb140
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "CudnnConvTransLayer.h"
#include "paddle/utils/Logging.h"
#include "paddle/utils/Stat.h"
namespace
paddle
{
REGISTER_LAYER
(
cudnn_convt
,
CudnnConvTransLayer
);
}
// namespace paddle
paddle/gserver/layers/CudnnConvTransLayer.h
0 → 100644
浏览文件 @
b8afb140
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "CudnnConvBaseLayer.h"
#include "Projection.h"
#include "paddle/math/Matrix.h"
namespace
paddle
{
/**
* @brief A 2-dimension conv layer implemented by cuDNN. It only
* supports GPU mode. We automatic select CudnnConvLayer for GPU
* mode and ExpandConvLayer for CPU mode if you set type of "conv".
* User also can specfiy type of "exconv" or "cudnn_conv" for
* particular type.
*
* The config file api is img_conv_layer.
*/
class
CudnnConvTransLayer
:
public
CudnnConvBaseLayer
{
public:
explicit
CudnnConvTransLayer
(
const
LayerConfig
&
config
)
:
CudnnConvBaseLayer
(
config
)
{}
~
CudnnConvTransLayer
()
{}
};
}
// namespace paddle
paddle/gserver/tests/test_ConvUnify.cpp
浏览文件 @
b8afb140
...
...
@@ -34,8 +34,7 @@ DECLARE_double(checkgrad_eps);
DECLARE_bool
(
thread_local_rand_use_global_seed
);
DECLARE_bool
(
prev_batch_state
);
// Do one forward pass of convTrans layer and check to see if its output
// matches the given result
// Do one forward pass of ConvLayer using either exconv or cudnn_conv
MatrixPtr
doOneConvTest
(
size_t
imgSize
,
size_t
output_x
,
size_t
stride
,
...
...
paddle/gserver/tests/test_LayerGrad.cpp
浏览文件 @
b8afb140
...
...
@@ -166,15 +166,19 @@ TEST(Projection, scaling) {
}
}
void
testProjectionConv
(
size_t
groups
)
{
void
testProjectionConv
(
size_t
groups
,
bool
isDeconv
)
{
const
int
NUM_FILTERS
=
18
;
const
int
FILTER_SIZE
=
2
;
const
int
FILTER_SIZE_Y
=
3
;
const
int
FILTER_SIZE_Y
=
4
;
const
int
CHANNELS
=
3
;
const
int
IMAGE_SIZE
=
16
;
ProjectionConfig
conf
;
if
(
isDeconv
)
{
conf
.
set_type
(
"convt"
);
}
else
{
conf
.
set_type
(
"conv"
);
}
conf
.
set_num_filters
(
NUM_FILTERS
);
ConvConfig
*
conv
=
conf
.
mutable_conv_conf
();
...
...
@@ -186,7 +190,11 @@ void testProjectionConv(size_t groups) {
conv
->
set_stride
(
2
);
conv
->
set_stride_y
(
2
);
conv
->
set_groups
(
groups
);
if
(
isDeconv
)
{
conv
->
set_filter_channels
(
NUM_FILTERS
/
conv
->
groups
());
}
else
{
conv
->
set_filter_channels
(
conv
->
channels
()
/
conv
->
groups
());
}
conv
->
set_img_size
(
IMAGE_SIZE
);
int
output_x
=
outputSize
(
conv
->
img_size
(),
conv
->
filter_size
(),
...
...
@@ -199,8 +207,14 @@ void testProjectionConv(size_t groups) {
conv
->
stride_y
(),
/* caffeMode */
true
);
conv
->
set_output_x
(
output_x
);
conv
->
set_output_y
(
output_y
);
if
(
isDeconv
)
{
conf
.
set_input_size
(
output_x
*
output_y
*
CHANNELS
);
conf
.
set_output_size
(
IMAGE_SIZE
*
IMAGE_SIZE
*
NUM_FILTERS
);
}
else
{
conf
.
set_input_size
(
IMAGE_SIZE
*
IMAGE_SIZE
*
CHANNELS
);
conf
.
set_output_size
(
output_x
*
output_y
*
NUM_FILTERS
);
}
testProjectionGrad
(
conf
,
INPUT_DATA
,
...
...
@@ -215,8 +229,12 @@ void testProjectionConv(size_t groups) {
#ifndef PADDLE_ONLY_CPU
TEST
(
Projection
,
conv
)
{
testProjectionConv
(
1
);
testProjectionConv
(
3
);
/// test ConvProjection
testProjectionConv
(
1
,
false
);
testProjectionConv
(
3
,
false
);
/// test ConvTransProjection
testProjectionConv
(
1
,
true
);
testProjectionConv
(
3
,
true
);
}
#endif
...
...
@@ -385,11 +403,11 @@ void testConvTransLayer(const string& type, bool trans, bool useGpu) {
config
.
layerConfig
.
set_partial_sum
(
1
);
config
.
layerConfig
.
set_shared_biases
(
true
);
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1024
,
288
});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
1024
,
384
});
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
ConvConfig
*
conv
=
input
->
mutable_conv_conf
();
conv
->
set_filter_size
(
2
);
conv
->
set_filter_size_y
(
3
);
conv
->
set_filter_size_y
(
4
);
conv
->
set_channels
(
16
);
conv
->
set_padding
(
0
);
conv
->
set_padding_y
(
1
);
...
...
@@ -416,6 +434,9 @@ TEST(Layer, convTransLayer) {
for
(
auto
useGpu
:
{
false
,
true
})
{
testConvTransLayer
(
"exconvt"
,
/* trans= */
false
,
/* useGpu= */
useGpu
);
}
#ifndef PADDLE_ONLY_CPU
testConvTransLayer
(
"cudnn_convt"
,
/* trans= */
false
,
/* useGpu= */
true
);
#endif
}
TEST
(
Layer
,
blockExpandLayer
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录