Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
fb20187a
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
fb20187a
编写于
11月 03, 2016
作者:
W
wangyang59
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
deconv layer implementation modification following luotao1 comments
上级
3d72e949
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
115 addition
and
126 deletion
+115
-126
paddle/gserver/layers/ConvBaseLayer.cpp
paddle/gserver/layers/ConvBaseLayer.cpp
+32
-33
paddle/gserver/layers/ConvBaseLayer.h
paddle/gserver/layers/ConvBaseLayer.h
+0
-2
paddle/gserver/layers/ExpandConvBaseLayer.cpp
paddle/gserver/layers/ExpandConvBaseLayer.cpp
+8
-8
paddle/gserver/tests/test_ConvTrans.cpp
paddle/gserver/tests/test_ConvTrans.cpp
+30
-33
paddle/gserver/tests/test_LayerGrad.cpp
paddle/gserver/tests/test_LayerGrad.cpp
+2
-4
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+42
-45
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+1
-1
未找到文件。
paddle/gserver/layers/ConvBaseLayer.cpp
浏览文件 @
fb20187a
...
...
@@ -89,42 +89,41 @@ size_t ConvBaseLayer::calOutputSize() {
clearAndReserve
(
&
outputW_
);
size_t
layerSize
=
0
;
if
(
!
isDeconv_
)
{
auto
setLayerSize
=
[
&
](
IntV
&
inH
,
IntV
&
inW
,
IntV
&
outH
,
IntV
&
outW
)
{
for
(
size_t
i
=
0
;
i
<
inputLayers_
.
size
();
i
++
)
{
imgSizeH_
.
push_back
(
inputLayers_
[
i
]
->
getOutput
().
getFrameHeight
());
imgSizeW_
.
push_back
(
inputLayers_
[
i
]
->
getOutput
().
getFrameWidth
());
if
(
imgSizeH_
[
i
]
==
0
)
imgSizeH_
[
i
]
=
config_
.
inputs
(
i
).
conv_conf
().
img_size
();
if
(
imgSizeW_
[
i
]
==
0
)
imgSizeW_
[
i
]
=
config_
.
inputs
(
i
).
conv_conf
().
img_size
();
outputH_
.
push_back
(
outputSize
(
imgSizeH_
[
i
],
filterSizeY_
[
i
],
paddingY_
[
i
],
strideY_
[
i
]));
outputW_
.
push_back
(
outputSize
(
imgSizeW_
[
i
],
filterSize_
[
i
],
padding_
[
i
],
stride_
[
i
]));
CHECK_EQ
(
outputH_
[
i
],
outputH_
[
0
]);
CHECK_EQ
(
outputW_
[
i
],
outputW_
[
0
]);
}
getOutput
().
setFrameHeight
(
outputH_
[
0
]);
getOutput
().
setFrameWidth
(
outputW_
[
0
]);
layerSize
=
outputH_
[
0
]
*
outputW_
[
0
]
*
size_t
(
numFilters_
);
inH
.
push_back
(
inputLayers_
[
i
]
->
getOutput
().
getFrameHeight
());
inW
.
push_back
(
inputLayers_
[
i
]
->
getOutput
().
getFrameWidth
());
if
(
isDeconv_
)
{
if
(
inH
[
i
]
==
0
)
inH
[
i
]
=
config_
.
inputs
(
i
).
conv_conf
().
output_x
();
if
(
inW
[
i
]
==
0
)
inW
[
i
]
=
config_
.
inputs
(
i
).
conv_conf
().
output_x
();
outH
.
push_back
(
imageSize
(
inH
[
i
],
filterSizeY_
[
i
],
paddingY_
[
i
],
strideY_
[
i
]));
outW
.
push_back
(
imageSize
(
inW
[
i
],
filterSize_
[
i
],
padding_
[
i
],
stride_
[
i
]));
}
else
{
for
(
size_t
i
=
0
;
i
<
inputLayers_
.
size
();
i
++
)
{
outputH_
.
push_back
(
inputLayers_
[
i
]
->
getOutput
().
getFrameHeight
());
outputW_
.
push_back
(
inputLayers_
[
i
]
->
getOutput
().
getFrameWidth
());
if
(
outputH_
[
i
]
==
0
)
outputH_
[
i
]
=
config_
.
inputs
(
i
).
conv_conf
().
output_x
();
if
(
outputW_
[
i
]
==
0
)
outputW_
[
i
]
=
config_
.
inputs
(
i
).
conv_conf
().
output_x
();
imgSizeH_
.
push_back
(
imageSize
(
outputH_
[
i
],
filterSizeY_
[
i
],
paddingY_
[
i
],
strideY_
[
i
]));
imgSizeW_
.
push_back
(
imageSize
(
outputW_
[
i
],
filterSize_
[
i
],
padding_
[
i
],
stride_
[
i
]));
CHECK_EQ
(
imgSizeH_
[
i
],
imgSizeH_
[
0
]);
CHECK_EQ
(
imgSizeW_
[
i
],
imgSizeW_
[
0
]);
if
(
inH
[
i
]
==
0
)
inH
[
i
]
=
config_
.
inputs
(
i
).
conv_conf
().
img_size
();
if
(
inW
[
i
]
==
0
)
inW
[
i
]
=
config_
.
inputs
(
i
).
conv_conf
().
img_size
();
outH
.
push_back
(
outputSize
(
inH
[
i
],
filterSizeY_
[
i
],
paddingY_
[
i
],
strideY_
[
i
]));
outW
.
push_back
(
outputSize
(
inW
[
i
],
filterSize_
[
i
],
padding_
[
i
],
stride_
[
i
]));
CHECK_EQ
(
outH
[
i
],
outH
[
0
]);
CHECK_EQ
(
outW
[
i
],
outW
[
0
]);
}
}
getOutput
().
setFrameHeight
(
imgSizeH_
[
0
]);
getOutput
().
setFrameWidth
(
imgSizeW_
[
0
]);
layerSize
=
imgSizeH_
[
0
]
*
imgSizeW_
[
0
]
*
size_t
(
numFilters_
);
getOutput
().
setFrameHeight
(
outH
[
0
]);
getOutput
().
setFrameWidth
(
outW
[
0
]);
layerSize
=
outH
[
0
]
*
outW
[
0
]
*
size_t
(
numFilters_
);
};
if
(
isDeconv_
)
{
setLayerSize
(
outputH_
,
outputW_
,
imgSizeH_
,
imgSizeW_
);
}
else
{
setLayerSize
(
imgSizeH_
,
imgSizeW_
,
outputH_
,
outputW_
);
}
return
layerSize
;
...
...
paddle/gserver/layers/ConvBaseLayer.h
浏览文件 @
fb20187a
...
...
@@ -78,8 +78,6 @@ protected:
/// of output size.
bool
caffeMode_
;
public:
explicit
ConvBaseLayer
(
const
LayerConfig
&
config
)
:
Layer
(
config
)
{}
...
...
paddle/gserver/layers/ExpandConvBaseLayer.cpp
浏览文件 @
fb20187a
...
...
@@ -31,14 +31,14 @@ bool ExpandConvBaseLayer::init(const LayerMap &layerMap,
* convTrans, and in other functions too.
* */
int
channel
;
int
n
f
;
int
n
umFilters
;
/* Initialize the projection */
for
(
auto
&
inputConfig
:
config_
.
inputs
())
{
const
ConvConfig
&
conf
=
inputConfig
.
conv_conf
();
n
f
=
(
!
isDeconv_
)
?
numFilters_
:
conf
.
channels
()
;
subM_
.
push_back
(
n
f
/
conf
.
groups
());
n
umFilters
=
isDeconv_
?
conf
.
channels
()
:
numFilters_
;
subM_
.
push_back
(
n
umFilters
/
conf
.
groups
());
subN_
.
push_back
(
conf
.
output_x
()
*
conf
.
output_x
());
channel
=
(
!
isDeconv_
)
?
conf
.
channels
()
:
numFilters_
;
channel
=
isDeconv_
?
numFilters_
:
conf
.
channels
()
;
subK_
.
push_back
(
channel
*
conf
.
filter_size
()
*
conf
.
filter_size
()
/
conf
.
groups
());
/* Consistent caffe mode for multiple input */
...
...
@@ -99,7 +99,7 @@ void ExpandConvBaseLayer::addUnsharedBias() {
void
ExpandConvBaseLayer
::
expandOneFrame
(
MatrixPtr
image
,
size_t
startIdx
,
int
inIdx
)
{
int
channel
=
(
!
isDeconv_
)
?
channels_
[
inIdx
]
:
numFilters_
;
int
channel
=
isDeconv_
?
numFilters_
:
channels_
[
inIdx
]
;
resetExpandInput
(
subK_
[
inIdx
]
*
groups_
[
inIdx
],
subN_
[
inIdx
]);
real
*
imgData
=
image
->
getData
()
+
startIdx
*
image
->
getWidth
();
...
...
@@ -122,10 +122,10 @@ void ExpandConvBaseLayer::expandFwdOnce(MatrixPtr image, MatrixPtr out,
expandOneFrame
(
image
,
startIdx
,
inIdx
);
int
n
f
=
(
!
isDeconv_
)
?
numFilters_
:
channels_
[
inIdx
]
;
int
n
umFilters
=
isDeconv_
?
channels_
[
inIdx
]
:
numFilters_
;
real
*
outData
=
out
->
getData
()
+
startIdx
*
subN
*
n
f
;
out
->
getData
()
+
startIdx
*
subN
*
n
umFilters
;
real
*
wgtData
=
weights_
[
inIdx
]
->
getW
()
->
getData
();
real
*
expInData
=
expandInput_
->
getData
();
...
...
@@ -147,7 +147,7 @@ void ExpandConvBaseLayer::expandFwdOnce(MatrixPtr image, MatrixPtr out,
void
ExpandConvBaseLayer
::
bpropActs
(
MatrixPtr
out
,
MatrixPtr
image
,
int
inpIdx
)
{
int
channel
=
(
!
isDeconv_
)
?
channels_
[
inpIdx
]
:
numFilters_
;
int
channel
=
isDeconv_
?
numFilters_
:
channels_
[
inpIdx
]
;
int
subM
=
subM_
[
inpIdx
];
int
subN
=
subN_
[
inpIdx
];
...
...
paddle/gserver/tests/test_ConvTrans.cpp
浏览文件 @
fb20187a
...
...
@@ -189,58 +189,55 @@ void doOneConvtTest(size_t imgSize, size_t output_x, size_t stride,
}
TEST
(
Layer
,
convTransLayerFwd2
)
{
size_t
imgSize
,
output_x
,
stride
,
padding
,
filter_size
;
MatrixPtr
result
;
imgSize
=
5
;
output_x
=
1
;
stride
=
1
;
padding
=
0
;
filter_size
=
5
;
result
=
Matrix
::
create
(
1
,
imgSize
*
imgSize
,
false
,
false
);
result
=
Matrix
::
create
(
1
,
5
*
5
,
false
,
false
);
result
->
zeroMem
();
result
->
add
(
1.0
);
doOneConvtTest
(
imgSize
,
output_x
,
stride
,
padding
,
filter_size
,
result
);
doOneConvtTest
(
/* imgSize */
5
,
/* output_x */
1
,
/* stride */
1
,
/* padding */
0
,
/* filter_size */
5
,
result
);
imgSize
=
5
;
output_x
=
2
;
stride
=
1
;
padding
=
0
;
filter_size
=
4
;
float
resultData
[]
=
{
1
,
2
,
2
,
2
,
1
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
1
,
2
,
2
,
2
,
1
};
result
=
Matrix
::
create
(
resultData
,
1
,
imgSize
*
imgSize
,
false
,
false
);
doOneConvtTest
(
imgSize
,
output_x
,
stride
,
padding
,
filter_size
,
result
);
imgSize
=
5
;
output_x
=
2
;
stride
=
2
;
padding
=
1
;
filter_size
=
5
;
result
->
setData
(
resultData
);
doOneConvtTest
(
/* imgSize */
5
,
/* output_x */
2
,
/* stride */
1
,
/* padding */
0
,
/* filter_size */
4
,
result
)
;
float
resultData2
[]
=
{
1
,
2
,
2
,
2
,
1
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
2
,
4
,
4
,
4
,
2
,
1
,
2
,
2
,
2
,
1
};
result
=
Matrix
::
create
(
resultData2
,
1
,
imgSize
*
imgSize
,
false
,
false
);
doOneConvtTest
(
imgSize
,
output_x
,
stride
,
padding
,
filter_size
,
result
);
imgSize
=
5
;
output_x
=
2
;
stride
=
2
;
padding
=
0
;
filter_size
=
3
;
result
->
setData
(
resultData2
);
doOneConvtTest
(
/* imgSize */
5
,
/* output_x */
2
,
/* stride */
2
,
/* padding */
1
,
/* filter_size */
5
,
result
)
;
float
resultData3
[]
=
{
1
,
1
,
2
,
1
,
1
,
1
,
1
,
2
,
1
,
1
,
2
,
2
,
4
,
2
,
2
,
1
,
1
,
2
,
1
,
1
,
1
,
1
,
2
,
1
,
1
};
result
=
Matrix
::
create
(
resultData3
,
1
,
imgSize
*
imgSize
,
false
,
false
);
doOneConvtTest
(
imgSize
,
output_x
,
stride
,
padding
,
filter_size
,
result
);
}
result
->
setData
(
resultData3
);
doOneConvtTest
(
/* imgSize */
5
,
/* output_x */
2
,
/* stride */
2
,
/* padding */
0
,
/* filter_size */
3
,
result
);}
int
main
(
int
argc
,
char
**
argv
)
{
testing
::
InitGoogleTest
(
&
argc
,
argv
);
...
...
paddle/gserver/tests/test_LayerGrad.cpp
浏览文件 @
fb20187a
...
...
@@ -351,12 +351,10 @@ void testConvTransLayer(const string& type, bool trans, bool useGpu) {
TEST
(
Layer
,
convTransLayer
)
{
testConvTransLayer
(
"exconvt"
,
/* trans= */
false
,
/* useGpu= */
false
);
/*
#ifndef PADDLE_ONLY_CPU
testConv
Layer("exconv", trans= false, useGpu=
true);
testConvLayer("cudnn_conv", trans= false, useGpu=
true);
testConv
TransLayer
(
"exconvt"
,
/* trans= */
false
,
/* useGpu= */
true
);
// testConvLayer("cudnn_conv", /* trans= */ false, /* useGpu= */
true);
#endif
*/
}
TEST
(
Layer
,
blockExpandLayer
)
{
...
...
python/paddle/trainer/config_parser.py
浏览文件 @
fb20187a
...
...
@@ -1082,7 +1082,11 @@ def parse_norm(norm, input_layer_name, norm_conf):
else
:
norm_conf
.
scale
/=
norm
.
size
**
2
def
parse_conv
(
conv
,
input_layer_name
,
conv_conf
):
'''
caffe_mode: compute the output size using floor instead of ceil,
which is consistent of caffe and CuDNN's convention.
'''
def
parse_conv
(
conv
,
input_layer_name
,
conv_conf
,
trans
=
False
):
conv_conf
.
filter_size
=
conv
.
filter_size
conv_conf
.
filter_size_y
=
conv
.
filter_size_y
conv_conf
.
channels
=
conv
.
channels
...
...
@@ -1094,6 +1098,7 @@ def parse_conv(conv, input_layer_name, conv_conf):
conv_conf
.
filter_channels
=
conv
.
channels
/
conv
.
groups
conv_conf
.
caffe_mode
=
conv
.
caffe_mode
if
not
trans
:
img_pixels
=
g_layer_map
[
input_layer_name
].
size
/
conv
.
channels
print
(
'channels=%d size=%d'
%
(
conv
.
channels
,
g_layer_map
[
input_layer_name
].
size
))
...
...
@@ -1102,23 +1107,15 @@ def parse_conv(conv, input_layer_name, conv_conf):
(
"Input layer %s: Incorrect input image size %d for input "
+
"image pixels %d"
)
%
(
input_layer_name
,
conv_conf
.
img_size
,
img_pixels
))
conv_conf
.
output_x
=
cnn_output_size
(
conv_conf
.
img_size
,
conv_conf
.
filter_size
,
conv_conf
.
padding
,
conv_conf
.
stride
,
conv_conf
.
caffe_mode
)
def
parse_conv_trans
(
conv
,
input_layer_name
,
conv_conf
,
num_filters
):
conv_conf
.
filter_size
=
conv
.
filter_size
conv_conf
.
filter_size_y
=
conv
.
filter_size_y
conv_conf
.
channels
=
conv
.
channels
conv_conf
.
padding
=
conv
.
padding
conv_conf
.
padding_y
=
conv
.
padding_y
conv_conf
.
stride
=
conv
.
stride
conv_conf
.
stride_y
=
conv
.
stride_y
conv_conf
.
groups
=
conv
.
groups
conv_conf
.
filter_channels
=
num_filters
/
conv
.
groups
conv_conf
.
caffe_mode
=
conv
.
caffe_mode
if
conv
.
caffe_mode
:
conv_conf
.
output_x
=
\
1
+
int
(
math
.
floor
((
2
*
conv
.
padding
+
conv_conf
.
img_size
\
-
conv
.
filter_size
)
/
float
(
conv
.
stride
)))
else
:
conv_conf
.
output_x
=
\
1
+
int
(
math
.
ceil
((
2
*
conv
.
padding
+
conv_conf
.
img_size
\
-
conv
.
filter_size
)
/
float
(
conv
.
stride
)))
else
:
outputSize
=
g_layer_map
[
input_layer_name
].
size
/
conv
.
channels
print
(
'channels=%d size=%d'
%
(
conv
.
channels
,
g_layer_map
[
input_layer_name
].
size
))
...
...
@@ -1136,7 +1133,6 @@ def parse_conv_trans(conv, input_layer_name, conv_conf, num_filters):
(
conv_conf
.
output_x
-
2
)
*
conv
.
stride
\
+
conv
.
filter_size
-
2
*
conv
.
padding
+
1
def
parse_block_expand
(
block_expand
,
input_layer_name
,
block_expand_conf
):
block_expand_conf
.
channels
=
block_expand
.
channels
block_expand_conf
.
stride_x
=
block_expand
.
stride_x
...
...
@@ -1685,10 +1681,11 @@ class ConvTransLayerBase(LayerBase):
for
input_index
in
xrange
(
len
(
self
.
inputs
)):
input_layer
=
self
.
get_input_layer
(
input_index
)
parse_conv
_trans
(
parse_conv
(
self
.
inputs
[
input_index
].
conv
,
input_layer
.
name
,
self
.
config
.
inputs
[
input_index
].
conv_conf
,
num_filters
)
self
.
config
.
inputs
[
input_index
].
conv_conf
,
num_filters
,
trans
=
True
)
conv_conf
=
self
.
config
.
inputs
[
input_index
].
conv_conf
psize
=
self
.
calc_parameter_size
(
conv_conf
)
print
(
"output size for %s is %d "
%
(
name
,
conv_conf
.
output_x
))
...
...
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
fb20187a
...
...
@@ -36,7 +36,7 @@ __all__ = ["full_matrix_projection", "AggregateLevel", "ExpandLevel",
"pooling_layer"
,
"lstmemory"
,
"last_seq"
,
"first_seq"
,
"cos_sim"
,
"hsigmoid"
,
"conv_projection"
,
"regression_cost"
,
'classification_cost'
,
"LayerOutput"
,
'img_conv_layer'
,
'img_
convTrans_layer'
,
'img_
pool_layer'
,
'batch_norm_layer'
,
'img_conv_layer'
,
'img_pool_layer'
,
'batch_norm_layer'
,
'img_cmrnorm_layer'
,
'addto_layer'
,
'concat_layer'
,
'lstm_step_layer'
,
'recurrent_group'
,
'memory'
,
'StaticInput'
,
'expand_layer'
,
'scaling_layer'
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录