Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
fe6af6b6
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
fe6af6b6
编写于
11月 30, 2017
作者:
G
guosheng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Enhance the AvgPooling to support optional exclude-mode
上级
1b6dcc2f
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
107 addition
and
37 deletion
+107
-37
paddle/cuda/include/stub/hl_cnn_stub.h
paddle/cuda/include/stub/hl_cnn_stub.h
+4
-2
paddle/cuda/src/hl_cuda_cnn.cu
paddle/cuda/src/hl_cuda_cnn.cu
+18
-10
paddle/gserver/layers/PoolLayer.cpp
paddle/gserver/layers/PoolLayer.cpp
+2
-0
paddle/gserver/layers/PoolLayer.h
paddle/gserver/layers/PoolLayer.h
+2
-0
paddle/gserver/layers/PoolProjection.cpp
paddle/gserver/layers/PoolProjection.cpp
+6
-2
paddle/gserver/layers/PoolProjection.h
paddle/gserver/layers/PoolProjection.h
+1
-0
paddle/gserver/tests/test_LayerGrad.cpp
paddle/gserver/tests/test_LayerGrad.cpp
+15
-1
paddle/math/Matrix.cpp
paddle/math/Matrix.cpp
+16
-8
paddle/math/Matrix.h
paddle/math/Matrix.h
+13
-6
proto/ModelConfig.proto
proto/ModelConfig.proto
+2
-0
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+6
-3
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+10
-4
python/paddle/trainer_config_helpers/poolings.py
python/paddle/trainer_config_helpers/poolings.py
+12
-1
未找到文件。
paddle/cuda/include/stub/hl_cnn_stub.h
浏览文件 @
fe6af6b6
...
@@ -68,7 +68,8 @@ inline void hl_avgpool_forward(const int frameCnt,
...
@@ -68,7 +68,8 @@ inline void hl_avgpool_forward(const int frameCnt,
const
int
paddingH
,
const
int
paddingH
,
const
int
paddingW
,
const
int
paddingW
,
real
*
tgtData
,
real
*
tgtData
,
const
int
tgtStride
)
{}
const
int
tgtStride
,
const
bool
excludeMode
)
{}
inline
void
hl_avgpool_backward
(
const
int
frameCnt
,
inline
void
hl_avgpool_backward
(
const
int
frameCnt
,
const
real
*
outGrad
,
const
real
*
outGrad
,
...
@@ -86,7 +87,8 @@ inline void hl_avgpool_backward(const int frameCnt,
...
@@ -86,7 +87,8 @@ inline void hl_avgpool_backward(const int frameCnt,
real
scaleA
,
real
scaleA
,
real
scaleB
,
real
scaleB
,
real
*
backGrad
,
real
*
backGrad
,
const
int
outStride
)
{}
const
int
outStride
,
const
bool
excludeMode
)
{}
inline
void
hl_maxpool3D_forward
(
const
int
frameCnt
,
inline
void
hl_maxpool3D_forward
(
const
int
frameCnt
,
const
real
*
inputData
,
const
real
*
inputData
,
...
...
paddle/cuda/src/hl_cuda_cnn.cu
浏览文件 @
fe6af6b6
...
@@ -210,7 +210,8 @@ __global__ void KeAvgPoolForward(const int nthreads,
...
@@ -210,7 +210,8 @@ __global__ void KeAvgPoolForward(const int nthreads,
const
int
padH
,
const
int
padH
,
const
int
padW
,
const
int
padW
,
real
*
tgtData
,
real
*
tgtData
,
const
int
tgtStride
)
{
const
int
tgtStride
,
const
bool
excludeMode
)
{
int
index
=
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
;
int
index
=
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
;
if
(
index
<
nthreads
)
{
if
(
index
<
nthreads
)
{
int
pw
=
index
%
pooledW
;
int
pw
=
index
%
pooledW
;
...
@@ -224,7 +225,8 @@ __global__ void KeAvgPoolForward(const int nthreads,
...
@@ -224,7 +225,8 @@ __global__ void KeAvgPoolForward(const int nthreads,
int
wend
=
min
(
wstart
+
sizeX
,
width
);
int
wend
=
min
(
wstart
+
sizeX
,
width
);
hstart
=
max
(
hstart
,
0
);
hstart
=
max
(
hstart
,
0
);
wstart
=
max
(
wstart
,
0
);
wstart
=
max
(
wstart
,
0
);
int
pool_size
=
(
hend
-
hstart
)
*
(
wend
-
wstart
);
int
poolSize
=
excludeMode
?
(
hend
-
hstart
)
*
(
wend
-
wstart
)
:
sizeY
*
sizeX
;
real
aveval
=
0
;
real
aveval
=
0
;
inputData
+=
(
frameNum
*
channels
+
c
)
*
height
*
width
;
inputData
+=
(
frameNum
*
channels
+
c
)
*
height
*
width
;
...
@@ -235,7 +237,7 @@ __global__ void KeAvgPoolForward(const int nthreads,
...
@@ -235,7 +237,7 @@ __global__ void KeAvgPoolForward(const int nthreads,
}
}
int
tgtIndex
=
int
tgtIndex
=
index
%
(
pooledW
*
pooledH
*
channels
)
+
frameNum
*
tgtStride
;
index
%
(
pooledW
*
pooledH
*
channels
)
+
frameNum
*
tgtStride
;
tgtData
[
tgtIndex
]
=
aveval
/
pool
_s
ize
;
tgtData
[
tgtIndex
]
=
aveval
/
pool
S
ize
;
}
}
}
}
...
@@ -253,7 +255,8 @@ void hl_avgpool_forward(const int frameCnt,
...
@@ -253,7 +255,8 @@ void hl_avgpool_forward(const int frameCnt,
const
int
paddingH
,
const
int
paddingH
,
const
int
paddingW
,
const
int
paddingW
,
real
*
tgtData
,
real
*
tgtData
,
const
int
tgtStride
)
{
const
int
tgtStride
,
const
bool
excludeMode
)
{
int
num_kernels
=
pooledH
*
pooledW
*
channels
*
frameCnt
;
int
num_kernels
=
pooledH
*
pooledW
*
channels
*
frameCnt
;
int
blocks
=
(
num_kernels
+
1024
-
1
)
/
1024
;
int
blocks
=
(
num_kernels
+
1024
-
1
)
/
1024
;
KeAvgPoolForward
<<<
blocks
,
1024
,
0
,
STREAM_DEFAULT
>>>
(
num_kernels
,
KeAvgPoolForward
<<<
blocks
,
1024
,
0
,
STREAM_DEFAULT
>>>
(
num_kernels
,
...
@@ -270,7 +273,8 @@ void hl_avgpool_forward(const int frameCnt,
...
@@ -270,7 +273,8 @@ void hl_avgpool_forward(const int frameCnt,
paddingH
,
paddingH
,
paddingW
,
paddingW
,
tgtData
,
tgtData
,
tgtStride
);
tgtStride
,
excludeMode
);
CHECK_SYNC
(
"hl_avgpool_forward failed"
);
CHECK_SYNC
(
"hl_avgpool_forward failed"
);
}
}
...
@@ -290,7 +294,8 @@ __global__ void KeAvgPoolBackward(const int nthreads,
...
@@ -290,7 +294,8 @@ __global__ void KeAvgPoolBackward(const int nthreads,
real
scaleA
,
real
scaleA
,
real
scaleB
,
real
scaleB
,
real
*
tgtGrad
,
real
*
tgtGrad
,
const
int
outStride
)
{
const
int
outStride
,
const
bool
excludeMode
)
{
int
index
=
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
;
int
index
=
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
;
if
(
index
<
nthreads
)
{
if
(
index
<
nthreads
)
{
int
offsetW
=
index
%
width
+
padW
;
int
offsetW
=
index
%
width
+
padW
;
...
@@ -314,8 +319,9 @@ __global__ void KeAvgPoolBackward(const int nthreads,
...
@@ -314,8 +319,9 @@ __global__ void KeAvgPoolBackward(const int nthreads,
int
wstart
=
pw
*
strideW
-
padW
;
int
wstart
=
pw
*
strideW
-
padW
;
int
wend
=
min
(
wstart
+
sizeX
,
width
);
int
wend
=
min
(
wstart
+
sizeX
,
width
);
wstart
=
max
(
wstart
,
0
);
wstart
=
max
(
wstart
,
0
);
int
poolsize
=
(
hend
-
hstart
)
*
(
wend
-
wstart
);
int
poolSize
=
gradient
+=
outGrad
[
ph
*
pooledW
+
pw
]
/
poolsize
;
excludeMode
?
(
hend
-
hstart
)
*
(
wend
-
wstart
)
:
sizeY
*
sizeX
;
gradient
+=
outGrad
[
ph
*
pooledW
+
pw
]
/
poolSize
;
}
}
}
}
tgtGrad
[
index
]
=
scaleB
*
tgtGrad
[
index
]
+
scaleA
*
gradient
;
tgtGrad
[
index
]
=
scaleB
*
tgtGrad
[
index
]
+
scaleA
*
gradient
;
...
@@ -338,7 +344,8 @@ void hl_avgpool_backward(const int frameCnt,
...
@@ -338,7 +344,8 @@ void hl_avgpool_backward(const int frameCnt,
real
scaleA
,
real
scaleA
,
real
scaleB
,
real
scaleB
,
real
*
backGrad
,
real
*
backGrad
,
const
int
outStride
)
{
const
int
outStride
,
const
bool
excludeMode
)
{
int
num_kernels
=
height
*
width
*
channels
*
frameCnt
;
int
num_kernels
=
height
*
width
*
channels
*
frameCnt
;
int
blocks
=
(
num_kernels
+
1024
-
1
)
/
1024
;
int
blocks
=
(
num_kernels
+
1024
-
1
)
/
1024
;
...
@@ -358,7 +365,8 @@ void hl_avgpool_backward(const int frameCnt,
...
@@ -358,7 +365,8 @@ void hl_avgpool_backward(const int frameCnt,
scaleA
,
scaleA
,
scaleB
,
scaleB
,
backGrad
,
backGrad
,
outStride
);
outStride
,
excludeMode
);
CHECK_SYNC
(
"hl_avgpool_backward failed"
);
CHECK_SYNC
(
"hl_avgpool_backward failed"
);
}
}
...
...
paddle/gserver/layers/PoolLayer.cpp
浏览文件 @
fe6af6b6
...
@@ -45,6 +45,8 @@ bool PoolLayer::init(const LayerMap& layerMap,
...
@@ -45,6 +45,8 @@ bool PoolLayer::init(const LayerMap& layerMap,
strideY_
=
conf
.
has_stride_y
()
?
conf
.
stride_y
()
:
conf
.
stride
();
strideY_
=
conf
.
has_stride_y
()
?
conf
.
stride_y
()
:
conf
.
stride
();
confPaddingY_
=
conf
.
has_padding_y
()
?
conf
.
padding_y
()
:
conf
.
padding
();
confPaddingY_
=
conf
.
has_padding_y
()
?
conf
.
padding_y
()
:
conf
.
padding
();
outputY_
=
conf
.
has_output_y
()
?
conf
.
output_y
()
:
conf
.
output_x
();
outputY_
=
conf
.
has_output_y
()
?
conf
.
output_y
()
:
conf
.
output_x
();
excludeMode_
=
conf
.
has_exclude_mode
()
?
conf
.
exclude_mode
()
:
true
;
return
true
;
return
true
;
}
}
...
...
paddle/gserver/layers/PoolLayer.h
浏览文件 @
fe6af6b6
...
@@ -38,6 +38,8 @@ protected:
...
@@ -38,6 +38,8 @@ protected:
std
::
string
poolType_
;
std
::
string
poolType_
;
bool
excludeMode_
;
public:
public:
explicit
PoolLayer
(
const
LayerConfig
&
config
)
:
Layer
(
config
)
{}
explicit
PoolLayer
(
const
LayerConfig
&
config
)
:
Layer
(
config
)
{}
...
...
paddle/gserver/layers/PoolProjection.cpp
浏览文件 @
fe6af6b6
...
@@ -36,6 +36,8 @@ PoolProjection::PoolProjection(const ProjectionConfig& config,
...
@@ -36,6 +36,8 @@ PoolProjection::PoolProjection(const ProjectionConfig& config,
strideY_
=
conf
.
has_stride_y
()
?
conf
.
stride_y
()
:
conf
.
stride
();
strideY_
=
conf
.
has_stride_y
()
?
conf
.
stride_y
()
:
conf
.
stride
();
confPaddingY_
=
conf
.
has_padding_y
()
?
conf
.
padding_y
()
:
conf
.
padding
();
confPaddingY_
=
conf
.
has_padding_y
()
?
conf
.
padding_y
()
:
conf
.
padding
();
outputY_
=
conf
.
has_output_y
()
?
conf
.
output_y
()
:
conf
.
output_x
();
outputY_
=
conf
.
has_output_y
()
?
conf
.
output_y
()
:
conf
.
output_x
();
excludeMode_
=
conf
.
has_exclude_mode
()
?
conf
.
exclude_mode
()
:
true
;
}
}
size_t
PoolProjection
::
getSize
()
{
size_t
PoolProjection
::
getSize
()
{
...
@@ -141,7 +143,8 @@ void AvgPoolProjection::forward() {
...
@@ -141,7 +143,8 @@ void AvgPoolProjection::forward() {
outputY_
,
outputY_
,
outputX_
,
outputX_
,
confPaddingY_
,
confPaddingY_
,
confPadding_
);
confPadding_
,
excludeMode_
);
}
}
void
AvgPoolProjection
::
backward
(
const
UpdateCallback
&
callback
)
{
void
AvgPoolProjection
::
backward
(
const
UpdateCallback
&
callback
)
{
...
@@ -166,6 +169,7 @@ void AvgPoolProjection::backward(const UpdateCallback& callback) {
...
@@ -166,6 +169,7 @@ void AvgPoolProjection::backward(const UpdateCallback& callback) {
1
,
1
,
1
,
1
,
confPaddingY_
,
confPaddingY_
,
confPadding_
);
confPadding_
,
excludeMode_
);
}
}
}
// namespace paddle
}
// namespace paddle
paddle/gserver/layers/PoolProjection.h
浏览文件 @
fe6af6b6
...
@@ -28,6 +28,7 @@ protected:
...
@@ -28,6 +28,7 @@ protected:
int
confPaddingY_
,
confPadding_
;
int
confPaddingY_
,
confPadding_
;
size_t
channels_
;
size_t
channels_
;
std
::
string
poolType_
;
std
::
string
poolType_
;
bool
excludeMode_
;
public:
public:
PoolProjection
(
const
ProjectionConfig
&
config
,
PoolProjection
(
const
ProjectionConfig
&
config
,
...
...
paddle/gserver/tests/test_LayerGrad.cpp
浏览文件 @
fe6af6b6
...
@@ -1211,7 +1211,10 @@ void setPoolConfig(TestConfig* config,
...
@@ -1211,7 +1211,10 @@ void setPoolConfig(TestConfig* config,
pool
->
set_output_y
(
oh
);
pool
->
set_output_y
(
oh
);
}
}
void
testPoolLayer
(
const
string
&
poolType
,
bool
trans
,
bool
useGpu
)
{
void
testPoolLayer
(
const
string
&
poolType
,
bool
trans
,
bool
useGpu
,
bool
excludeMode
=
true
)
{
TestConfig
config
;
TestConfig
config
;
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
3136
,
0
});
config
.
inputDefs
.
push_back
({
INPUT_DATA
,
"layer_0"
,
3136
,
0
});
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
LayerInputConfig
*
input
=
config
.
layerConfig
.
add_inputs
();
...
@@ -1219,6 +1222,7 @@ void testPoolLayer(const string& poolType, bool trans, bool useGpu) {
...
@@ -1219,6 +1222,7 @@ void testPoolLayer(const string& poolType, bool trans, bool useGpu) {
pool
->
set_img_size
(
14
);
pool
->
set_img_size
(
14
);
pool
->
set_img_size_y
(
14
);
pool
->
set_img_size_y
(
14
);
pool
->
set_exclude_mode
(
excludeMode
);
setPoolConfig
(
&
config
,
pool
,
poolType
);
setPoolConfig
(
&
config
,
pool
,
poolType
);
config
.
layerConfig
.
set_size
(
pool
->
output_x
()
*
pool
->
output_y
()
*
config
.
layerConfig
.
set_size
(
pool
->
output_x
()
*
pool
->
output_y
()
*
pool
->
channels
());
pool
->
channels
());
...
@@ -1250,16 +1254,26 @@ void testPoolLayer2(const string& poolType, bool trans, bool useGpu) {
...
@@ -1250,16 +1254,26 @@ void testPoolLayer2(const string& poolType, bool trans, bool useGpu) {
TEST
(
Layer
,
PoolLayer
)
{
TEST
(
Layer
,
PoolLayer
)
{
testPoolLayer
(
"avg-projection"
,
/* trans= */
false
,
/* useGpu= */
false
);
testPoolLayer
(
"avg-projection"
,
/* trans= */
false
,
/* useGpu= */
false
);
testPoolLayer
(
"avg-projection"
,
/* trans= */
false
,
/* useGpu= */
false
,
/* excludeMode= */
false
);
testPoolLayer
(
"max-projection"
,
/* trans= */
false
,
/* useGpu= */
false
);
testPoolLayer
(
"max-projection"
,
/* trans= */
false
,
/* useGpu= */
false
);
testPoolLayer
(
"max-pool-with-mask"
,
/* trans= */
false
,
/* useGpu= */
false
);
testPoolLayer
(
"max-pool-with-mask"
,
/* trans= */
false
,
/* useGpu= */
false
);
#ifdef PADDLE_WITH_CUDA
#ifdef PADDLE_WITH_CUDA
testPoolLayer
(
"avg-projection"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer
(
"avg-projection"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer
(
"avg-projection"
,
/* trans= */
false
,
/* useGpu= */
true
,
/* excludeMode= */
false
);
testPoolLayer
(
"max-projection"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer
(
"max-projection"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer
(
"cudnn-max-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer
(
"cudnn-max-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer
(
"cudnn-avg-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer
(
"cudnn-avg-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer2
(
"cudnn-max-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer2
(
"cudnn-max-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer2
(
"cudnn-avg-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer2
(
"cudnn-avg-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer2
(
"cudnn-avg-incl-pad-pool"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer
(
"max-pool-with-mask"
,
/* trans= */
false
,
/* useGpu= */
true
);
testPoolLayer
(
"max-pool-with-mask"
,
/* trans= */
false
,
/* useGpu= */
true
);
#endif
#endif
}
}
...
...
paddle/math/Matrix.cpp
浏览文件 @
fe6af6b6
...
@@ -1130,7 +1130,8 @@ void GpuMatrix::avgPoolForward(Matrix& inputMat,
...
@@ -1130,7 +1130,8 @@ void GpuMatrix::avgPoolForward(Matrix& inputMat,
size_t
outputH
,
size_t
outputH
,
size_t
outputW
,
size_t
outputW
,
size_t
paddingH
,
size_t
paddingH
,
size_t
paddingW
)
{
size_t
paddingW
,
bool
excludeMode
)
{
CHECK
(
inputMat
.
useGpu_
==
true
)
<<
"Matrix type are not equal"
;
CHECK
(
inputMat
.
useGpu_
==
true
)
<<
"Matrix type are not equal"
;
real
*
inputData
=
inputMat
.
getData
();
real
*
inputData
=
inputMat
.
getData
();
...
@@ -1153,7 +1154,8 @@ void GpuMatrix::avgPoolForward(Matrix& inputMat,
...
@@ -1153,7 +1154,8 @@ void GpuMatrix::avgPoolForward(Matrix& inputMat,
paddingH
,
paddingH
,
paddingW
,
paddingW
,
data_
,
data_
,
getStride
());
getStride
(),
excludeMode
);
}
}
void
GpuMatrix
::
avgPoolBackward
(
Matrix
&
outGrad
,
void
GpuMatrix
::
avgPoolBackward
(
Matrix
&
outGrad
,
...
@@ -1168,7 +1170,8 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad,
...
@@ -1168,7 +1170,8 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad,
real
scaleTargets
,
real
scaleTargets
,
real
scaleOutput
,
real
scaleOutput
,
size_t
paddingH
,
size_t
paddingH
,
size_t
paddingW
)
{
size_t
paddingW
,
bool
excludeMode
)
{
CHECK
(
outGrad
.
useGpu_
==
true
)
<<
"Matrix type are not equal"
;
CHECK
(
outGrad
.
useGpu_
==
true
)
<<
"Matrix type are not equal"
;
real
*
outDiff
=
outGrad
.
getData
();
real
*
outDiff
=
outGrad
.
getData
();
...
@@ -1194,7 +1197,8 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad,
...
@@ -1194,7 +1197,8 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad,
scaleTargets
,
scaleTargets
,
scaleOutput
,
scaleOutput
,
data_
,
data_
,
outGrad
.
getStride
());
outGrad
.
getStride
(),
excludeMode
);
}
}
void
GpuMatrix
::
maxPool3DForward
(
Matrix
&
inputMat
,
void
GpuMatrix
::
maxPool3DForward
(
Matrix
&
inputMat
,
...
@@ -2136,7 +2140,8 @@ void CpuMatrix::avgPoolForward(Matrix& input,
...
@@ -2136,7 +2140,8 @@ void CpuMatrix::avgPoolForward(Matrix& input,
size_t
outputH
,
size_t
outputH
,
size_t
outputW
,
size_t
outputW
,
size_t
paddingH
,
size_t
paddingH
,
size_t
paddingW
)
{
size_t
paddingW
,
bool
excludeMode
)
{
// The main loop
// The main loop
size_t
num
=
input
.
getHeight
();
size_t
num
=
input
.
getHeight
();
size_t
inLength
=
imgSizeH
*
imgSizeW
;
size_t
inLength
=
imgSizeH
*
imgSizeW
;
...
@@ -2165,7 +2170,8 @@ void CpuMatrix::avgPoolForward(Matrix& input,
...
@@ -2165,7 +2170,8 @@ void CpuMatrix::avgPoolForward(Matrix& input,
tgtData
[
ph
*
outputW
+
pw
]
+=
inData
[
h
*
imgSizeW
+
w
];
tgtData
[
ph
*
outputW
+
pw
]
+=
inData
[
h
*
imgSizeW
+
w
];
}
}
}
}
int
poolSize
=
(
hend
-
hstart
)
*
(
wend
-
wstart
);
int
poolSize
=
excludeMode
?
(
hend
-
hstart
)
*
(
wend
-
wstart
)
:
sizeY
*
sizeX
;
CHECK
(
poolSize
);
CHECK
(
poolSize
);
tgtData
[
ph
*
outputW
+
pw
]
/=
poolSize
;
tgtData
[
ph
*
outputW
+
pw
]
/=
poolSize
;
}
}
...
@@ -2189,7 +2195,8 @@ void CpuMatrix::avgPoolBackward(Matrix& input,
...
@@ -2189,7 +2195,8 @@ void CpuMatrix::avgPoolBackward(Matrix& input,
real
scaleTargets
,
real
scaleTargets
,
real
scaleOutput
,
real
scaleOutput
,
size_t
paddingH
,
size_t
paddingH
,
size_t
paddingW
)
{
size_t
paddingW
,
bool
excludeMode
)
{
size_t
num
=
input
.
getHeight
();
size_t
num
=
input
.
getHeight
();
size_t
channels
=
input
.
getWidth
()
/
outputH
/
outputW
;
size_t
channels
=
input
.
getWidth
()
/
outputH
/
outputW
;
size_t
inLength
=
imgSizeH
*
imgSizeW
;
size_t
inLength
=
imgSizeH
*
imgSizeW
;
...
@@ -2211,7 +2218,8 @@ void CpuMatrix::avgPoolBackward(Matrix& input,
...
@@ -2211,7 +2218,8 @@ void CpuMatrix::avgPoolBackward(Matrix& input,
int
wstart
=
pw
*
strideW
-
paddingW
;
int
wstart
=
pw
*
strideW
-
paddingW
;
int
wend
=
std
::
min
(
wstart
+
sizeX
,
imgSizeW
);
int
wend
=
std
::
min
(
wstart
+
sizeX
,
imgSizeW
);
wstart
=
std
::
max
(
wstart
,
0
);
wstart
=
std
::
max
(
wstart
,
0
);
int
poolSize
=
(
hend
-
hstart
)
*
(
wend
-
wstart
);
int
poolSize
=
excludeMode
?
(
hend
-
hstart
)
*
(
wend
-
wstart
)
:
sizeY
*
sizeX
;
CHECK
(
poolSize
);
CHECK
(
poolSize
);
for
(
int
h
=
hstart
;
h
<
hend
;
++
h
)
{
for
(
int
h
=
hstart
;
h
<
hend
;
++
h
)
{
...
...
paddle/math/Matrix.h
浏览文件 @
fe6af6b6
...
@@ -911,7 +911,8 @@ public:
...
@@ -911,7 +911,8 @@ public:
size_t
outputH
,
size_t
outputH
,
size_t
outputW
,
size_t
outputW
,
size_t
paddingH
,
size_t
paddingH
,
size_t
paddingW
)
{
size_t
paddingW
,
bool
excludeMode
=
true
)
{
LOG
(
FATAL
)
<<
"Not implemeted"
;
LOG
(
FATAL
)
<<
"Not implemeted"
;
}
}
...
@@ -927,9 +928,11 @@ public:
...
@@ -927,9 +928,11 @@ public:
real
scaleTargets
,
real
scaleTargets
,
real
scaleOutput
,
real
scaleOutput
,
size_t
paddingH
,
size_t
paddingH
,
size_t
paddingW
)
{
size_t
paddingW
,
bool
excludeMode
=
true
)
{
LOG
(
FATAL
)
<<
"Not implemeted"
;
LOG
(
FATAL
)
<<
"Not implemeted"
;
}
}
/**
/**
* Pooling 3D forward operation, pick out the largest element
* Pooling 3D forward operation, pick out the largest element
* in the sizeX of value
* in the sizeX of value
...
@@ -1458,7 +1461,8 @@ public:
...
@@ -1458,7 +1461,8 @@ public:
size_t
outputH
,
size_t
outputH
,
size_t
outputW
,
size_t
outputW
,
size_t
paddingH
,
size_t
paddingH
,
size_t
paddingW
);
size_t
paddingW
,
bool
excludeMode
=
true
);
void
avgPoolBackward
(
Matrix
&
input
,
void
avgPoolBackward
(
Matrix
&
input
,
size_t
imgSizeH
,
size_t
imgSizeH
,
...
@@ -1472,7 +1476,8 @@ public:
...
@@ -1472,7 +1476,8 @@ public:
real
scaleTargets
,
real
scaleTargets
,
real
scaleOutput
,
real
scaleOutput
,
size_t
paddingH
,
size_t
paddingH
,
size_t
paddingW
);
size_t
paddingW
,
bool
excludeMode
=
true
);
void
maxPool3DForward
(
Matrix
&
inputMat
,
void
maxPool3DForward
(
Matrix
&
inputMat
,
Matrix
&
maxPoolIdx
,
Matrix
&
maxPoolIdx
,
...
@@ -1730,7 +1735,8 @@ public:
...
@@ -1730,7 +1735,8 @@ public:
size_t
outputH
,
size_t
outputH
,
size_t
outputW
,
size_t
outputW
,
size_t
paddingH
,
size_t
paddingH
,
size_t
paddingW
);
size_t
paddingW
,
bool
excludeMode
=
true
);
void
avgPoolBackward
(
Matrix
&
input
,
void
avgPoolBackward
(
Matrix
&
input
,
size_t
imgSizeH
,
size_t
imgSizeH
,
...
@@ -1744,7 +1750,8 @@ public:
...
@@ -1744,7 +1750,8 @@ public:
real
scaleTargets
,
real
scaleTargets
,
real
scaleOutput
,
real
scaleOutput
,
size_t
paddingH
,
size_t
paddingH
,
size_t
paddingW
);
size_t
paddingW
,
bool
excludeMode
=
true
);
void
maxPool3DForward
(
Matrix
&
inputMat
,
void
maxPool3DForward
(
Matrix
&
inputMat
,
Matrix
&
maxPoolIdx
,
Matrix
&
maxPoolIdx
,
...
...
proto/ModelConfig.proto
浏览文件 @
fe6af6b6
...
@@ -139,6 +139,8 @@ message PoolConfig {
...
@@ -139,6 +139,8 @@ message PoolConfig {
optional
uint32
output_z
=
16
[
default
=
1
];
optional
uint32
output_z
=
16
[
default
=
1
];
optional
uint32
img_size_z
=
17
[
default
=
1
];
optional
uint32
img_size_z
=
17
[
default
=
1
];
optional
uint32
padding_z
=
18
[
default
=
1
];
optional
uint32
padding_z
=
18
[
default
=
1
];
optional
bool
exclude_mode
=
19
[
default
=
true
];
}
}
message
SppConfig
{
message
SppConfig
{
...
...
python/paddle/trainer/config_parser.py
浏览文件 @
fe6af6b6
...
@@ -1233,7 +1233,7 @@ def parse_bilinear(bilinear, input_layer_name, bilinear_conf):
...
@@ -1233,7 +1233,7 @@ def parse_bilinear(bilinear, input_layer_name, bilinear_conf):
bilinear_conf
.
out_size_y
=
bilinear
.
out_size_y
bilinear_conf
.
out_size_y
=
bilinear
.
out_size_y
def
parse_pool
(
pool
,
input_layer_name
,
pool_conf
,
ceil_mode
):
def
parse_pool
(
pool
,
input_layer_name
,
pool_conf
,
ceil_mode
,
exclude_mode
):
pool_conf
.
pool_type
=
pool
.
pool_type
pool_conf
.
pool_type
=
pool
.
pool_type
config_assert
(
pool
.
pool_type
in
[
config_assert
(
pool
.
pool_type
in
[
'max-projection'
,
'avg-projection'
,
'max-pool-with-mask'
,
'cudnn-max-pool'
,
'cudnn-avg-pool'
'max-projection'
,
'avg-projection'
,
'max-pool-with-mask'
,
'cudnn-max-pool'
,
'cudnn-avg-pool'
...
@@ -1263,6 +1263,8 @@ def parse_pool(pool, input_layer_name, pool_conf, ceil_mode):
...
@@ -1263,6 +1263,8 @@ def parse_pool(pool, input_layer_name, pool_conf, ceil_mode):
pool_conf
.
padding_y
,
pool_conf
.
padding_y
,
pool_conf
.
stride_y
,
not
ceil_mode
)
pool_conf
.
stride_y
,
not
ceil_mode
)
pool_conf
.
exclude_mode
=
exclude_mode
def
parse_pool3d
(
pool
,
input_layer_name
,
pool_conf
,
ceil_mode
):
def
parse_pool3d
(
pool
,
input_layer_name
,
pool_conf
,
ceil_mode
):
pool_conf
.
pool_type
=
pool
.
pool_type
pool_conf
.
pool_type
=
pool
.
pool_type
...
@@ -2303,7 +2305,8 @@ class NormLayer(LayerBase):
...
@@ -2303,7 +2305,8 @@ class NormLayer(LayerBase):
class
PoolLayer
(
LayerBase
):
class
PoolLayer
(
LayerBase
):
layer_type
=
'pool'
layer_type
=
'pool'
def
__init__
(
self
,
name
,
inputs
,
ceil_mode
=
True
,
**
xargs
):
def
__init__
(
self
,
name
,
inputs
,
ceil_mode
=
True
,
exclude_mode
=
True
,
**
xargs
):
use_mkldnn
=
int
(
g_command_config_args
.
get
(
"use_mkldnn"
,
0
))
use_mkldnn
=
int
(
g_command_config_args
.
get
(
"use_mkldnn"
,
0
))
if
self
.
layer_type
==
"mkldnn_pool"
:
if
self
.
layer_type
==
"mkldnn_pool"
:
config_assert
(
use_mkldnn
,
"mkldnn_pool only support MKLDNN"
)
config_assert
(
use_mkldnn
,
"mkldnn_pool only support MKLDNN"
)
...
@@ -2314,7 +2317,7 @@ class PoolLayer(LayerBase):
...
@@ -2314,7 +2317,7 @@ class PoolLayer(LayerBase):
input_layer
=
self
.
get_input_layer
(
input_index
)
input_layer
=
self
.
get_input_layer
(
input_index
)
pool_conf
=
self
.
config
.
inputs
[
input_index
].
pool_conf
pool_conf
=
self
.
config
.
inputs
[
input_index
].
pool_conf
parse_pool
(
self
.
inputs
[
input_index
].
pool
,
input_layer
.
name
,
parse_pool
(
self
.
inputs
[
input_index
].
pool
,
input_layer
.
name
,
pool_conf
,
ceil_mode
)
pool_conf
,
ceil_mode
,
exclude_mode
)
self
.
set_cnn_layer
(
name
,
pool_conf
.
output_y
,
pool_conf
.
output_x
,
self
.
set_cnn_layer
(
name
,
pool_conf
.
output_y
,
pool_conf
.
output_x
,
pool_conf
.
channels
)
pool_conf
.
channels
)
...
...
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
fe6af6b6
...
@@ -21,7 +21,7 @@ from .activations import LinearActivation, SigmoidActivation, TanhActivation, \
...
@@ -21,7 +21,7 @@ from .activations import LinearActivation, SigmoidActivation, TanhActivation, \
ReluActivation
,
IdentityActivation
,
SoftmaxActivation
,
BaseActivation
ReluActivation
,
IdentityActivation
,
SoftmaxActivation
,
BaseActivation
from
.evaluators
import
*
from
.evaluators
import
*
from
.poolings
import
MaxPooling
,
AvgPooling
,
MaxWithMaskPooling
,
BasePoolingType
,
\
from
.poolings
import
MaxPooling
,
AvgPooling
,
MaxWithMaskPooling
,
BasePoolingType
,
\
CudnnAvgPooling
,
CudnnMaxPooling
CudnnAvgPooling
,
Cudnn
AvgInclPadPooling
,
Cudnn
MaxPooling
from
.attrs
import
*
from
.attrs
import
*
from
.default_decorators
import
*
from
.default_decorators
import
*
...
@@ -2709,7 +2709,8 @@ def img_pool_layer(input,
...
@@ -2709,7 +2709,8 @@ def img_pool_layer(input,
pool_size_y
=
None
,
pool_size_y
=
None
,
stride_y
=
None
,
stride_y
=
None
,
padding_y
=
None
,
padding_y
=
None
,
ceil_mode
=
True
):
ceil_mode
=
True
,
exclude_mode
=
True
):
"""
"""
Image pooling Layer.
Image pooling Layer.
...
@@ -2773,10 +2774,14 @@ def img_pool_layer(input,
...
@@ -2773,10 +2774,14 @@ def img_pool_layer(input,
:param layer_attr: The extra layer attribute. See ExtraLayerAttribute for
:param layer_attr: The extra layer attribute. See ExtraLayerAttribute for
details.
details.
:type layer_attr: ExtraLayerAttribute
:type layer_attr: ExtraLayerAttribute
:param ceil_mode: Wether to use the ceil function to calculate output height and width.
:param ceil_mode: W
h
ether to use the ceil function to calculate output height and width.
True is the default. If it is set to False, the floor function will
True is the default. If it is set to False, the floor function will
be used.
be used.
:type ceil_mode: bool
:type ceil_mode: bool
:param exclude_mode: Whether to exclude the padding cells when calculating, but only
work when pool_type is AvgPooling. If use cudnn, use CudnnAvgPooling
or CudnnAvgInclPadPooling as pool_type to identify.
:type exclude_mode: bool
:return: LayerOutput object.
:return: LayerOutput object.
:rtype: LayerOutput
:rtype: LayerOutput
"""
"""
...
@@ -2790,7 +2795,7 @@ def img_pool_layer(input,
...
@@ -2790,7 +2795,7 @@ def img_pool_layer(input,
pool_type
.
name
=
'avg'
pool_type
.
name
=
'avg'
assert
type
(
pool_type
)
in
[
AvgPooling
,
MaxPooling
,
MaxWithMaskPooling
,
CudnnAvgPooling
,
assert
type
(
pool_type
)
in
[
AvgPooling
,
MaxPooling
,
MaxWithMaskPooling
,
CudnnAvgPooling
,
CudnnMaxPooling
],
\
CudnnMaxPooling
,
CudnnAvgInclPadPooling
],
\
"only (Cudnn)AvgPooling, (Cudnn)MaxPooling, MaxWithMaskPooling are supported"
"only (Cudnn)AvgPooling, (Cudnn)MaxPooling, MaxWithMaskPooling are supported"
type_name
=
pool_type
.
name
+
'-projection'
\
type_name
=
pool_type
.
name
+
'-projection'
\
...
@@ -2819,6 +2824,7 @@ def img_pool_layer(input,
...
@@ -2819,6 +2824,7 @@ def img_pool_layer(input,
padding_y
=
padding_y
))
padding_y
=
padding_y
))
],
],
ceil_mode
=
ceil_mode
,
ceil_mode
=
ceil_mode
,
exclude_mode
=
exclude_mode
,
**
ExtraLayerAttribute
.
to_kwargs
(
layer_attr
))
**
ExtraLayerAttribute
.
to_kwargs
(
layer_attr
))
return
LayerOutput
(
return
LayerOutput
(
name
,
name
,
...
...
python/paddle/trainer_config_helpers/poolings.py
浏览文件 @
fe6af6b6
...
@@ -16,7 +16,8 @@
...
@@ -16,7 +16,8 @@
__all__
=
[
__all__
=
[
"BasePoolingType"
,
"MaxPooling"
,
"AvgPooling"
,
"MaxWithMaskPooling"
,
"BasePoolingType"
,
"MaxPooling"
,
"AvgPooling"
,
"MaxWithMaskPooling"
,
"CudnnMaxPooling"
,
"CudnnAvgPooling"
,
"SumPooling"
,
"SquareRootNPooling"
"CudnnMaxPooling"
,
"CudnnAvgPooling"
,
"CudnnAvgInclPadPooling"
,
"SumPooling"
,
"SquareRootNPooling"
]
]
...
@@ -88,6 +89,16 @@ class CudnnAvgPooling(BasePoolingType):
...
@@ -88,6 +89,16 @@ class CudnnAvgPooling(BasePoolingType):
BasePoolingType
.
__init__
(
self
,
"cudnn-avg-pool"
)
BasePoolingType
.
__init__
(
self
,
"cudnn-avg-pool"
)
class
CudnnAvgInclPadPooling
(
BasePoolingType
):
"""
Cudnn average pooling only support GPU. Return the average value in the
pooling window taking into account the padding cells.
"""
def
__init__
(
self
):
BasePoolingType
.
__init__
(
self
,
"cudnn-avg-incl-pad-pool"
)
class
AvgPooling
(
BasePoolingType
):
class
AvgPooling
(
BasePoolingType
):
"""
"""
Average pooling.
Average pooling.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录