Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
090c974e
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
090c974e
编写于
2月 24, 2017
作者:
W
wangyang59
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
completed implementation of cudnn_convt convTransProjection and convTransOperator
上级
07c1ea25
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
148 addition
and
22 deletion
+148
-22
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+20
-7
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+4
-3
python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr
..._helpers/tests/configs/protostr/img_trans_layers.protostr
+4
-0
python/paddle/trainer_config_helpers/tests/configs/protostr/projections.protostr
...onfig_helpers/tests/configs/protostr/projections.protostr
+120
-12
未找到文件。
python/paddle/trainer/config_parser.py
浏览文件 @
090c974e
...
...
@@ -726,7 +726,7 @@ class ConvProjection(ConvBaseProjection):
**
xargs
):
super
(
ConvProjection
,
self
).
__init__
(
input_layer_name
,
**
xargs
)
parse_conv
(
conv_conf
,
input_layer_name
,
self
.
proj_conf
.
conv_conf
,
parse_conv
(
conv_conf
,
self
.
input_layer_name
,
self
.
proj_conf
.
conv_conf
,
num_filters
)
self
.
proj_conf
.
output_size
=
self
.
proj_conf
.
conv_conf
.
output_x
*
\
self
.
proj_conf
.
conv_conf
.
output_y
*
\
...
...
@@ -746,7 +746,7 @@ class ConvTransProjection(ConvBaseProjection):
parse_conv
(
conv_conf
,
input_layer_name
,
self
.
input_layer_name
,
self
.
proj_conf
.
conv_conf
,
num_filters
,
trans
=
True
)
...
...
@@ -1834,7 +1834,16 @@ class ConvTransLayerBase(LayerBase):
use_gpu
=
int
(
g_command_config_args
.
get
(
"use_gpu"
,
0
))
parallel_nn
=
int
(
g_command_config_args
.
get
(
"parallel_nn"
,
0
))
# cudnn_convt has not been implemented so use exconvt only
# Automatically select cudnn_type for GPU and exconvt for CPU
# if set type=exconvt, but still reserve the way user specify
# exconvt or cudnn_convt manually.
if
self
.
layer_type
==
"cudnn_convt"
:
config_assert
(
use_gpu
,
"cudnn_convt only support GPU"
)
if
(
use_gpu
==
1
and
self
.
layer_type
!=
"exconvt"
and
(
parallel_nn
==
0
or
self
.
config
.
device
>
-
1
)):
self
.
layer_type
=
"cudnn_convt"
else
:
self
.
layer_type
=
"exconvt"
# need to specify layer in config
self
.
config
.
type
=
self
.
layer_type
...
...
@@ -1852,10 +1861,9 @@ class ConvTransLayerBase(LayerBase):
trans
=
True
)
conv_conf
=
self
.
config
.
inputs
[
input_index
].
conv_conf
psize
=
self
.
calc_parameter_size
(
conv_conf
)
print
(
"output size for %s is %d "
%
(
name
,
conv_conf
.
output_x
))
self
.
create_input_parameter
(
input_index
,
psize
)
self
.
set_
layer_size
(
(
conv_conf
.
img_size
**
2
)
*
self
.
config
.
num_filters
)
self
.
set_
cnn_layer
(
name
,
conv_conf
.
img_size_y
,
conv_conf
.
img_size
,
self
.
config
.
num_filters
)
psize
=
self
.
config
.
size
if
shared_biases
:
...
...
@@ -1872,6 +1880,11 @@ class ConvTransLayer(ConvTransLayerBase):
layer_type
=
'exconvt'
@
config_layer
(
'cudnn_convt'
)
class
ConvTransLayer
(
ConvTransLayerBase
):
layer_type
=
'cudnn_convt'
@
config_layer
(
'norm'
)
class
NormLayer
(
LayerBase
):
def
__init__
(
self
,
name
,
inputs
,
**
xargs
):
...
...
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
090c974e
...
...
@@ -2046,8 +2046,9 @@ def img_conv_layer(input,
:param trans: true if it is a convTransLayer, false if it is a convLayer
:type trans: bool
:param layer_type: specify the layer_type, default is None. If trans=True,
layer_type has to be "exconvt", otherwise layer_type
has to be either "exconv" or "cudnn_conv"
layer_type has to be "exconvt" or "cudnn_convt",
otherwise layer_type has to be either "exconv" or
"cudnn_conv"
:type layer_type: String
:return: LayerOutput object.
:rtype: LayerOutput
...
...
@@ -2087,7 +2088,7 @@ def img_conv_layer(input,
if
layer_type
:
if
trans
:
assert
layer_type
in
[
"exconvt"
]
assert
layer_type
in
[
"exconvt"
,
"cudnn_convt"
]
else
:
assert
layer_type
in
[
"exconv"
,
"cudnn_conv"
]
lt
=
layer_type
...
...
python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr
浏览文件 @
090c974e
...
...
@@ -33,6 +33,8 @@ layers {
bias_parameter_name: "___conv_0__.wbias"
num_filters: 64
shared_biases: true
height: 256
width: 256
}
layers {
name: "__batch_norm_0__"
...
...
@@ -58,6 +60,8 @@ layers {
}
bias_parameter_name: "___batch_norm_0__.wbias"
moving_average_fraction: 0.9
height: 256
width: 256
}
layers {
name: "__crmnorm_0__"
...
...
python/paddle/trainer_config_helpers/tests/configs/protostr/projections.protostr
浏览文件 @
090c974e
...
...
@@ -154,13 +154,38 @@ layers {
inputs {
input_layer_name: "img"
}
inputs {
input_layer_name: "img"
proj_conf {
type: "conv"
name: "___mixed_6__.w1"
input_size: 1024
output_size: 57600
conv_conf {
filter_size: 3
channels: 1
stride: 1
padding: 0
groups: 1
filter_channels: 1
output_x: 30
img_size: 32
caffe_mode: true
filter_size_y: 3
padding_y: 0
stride_y: 1
output_y: 30
img_size_y: 32
}
}
}
inputs {
input_layer_name: "filter"
}
operator_confs {
type: "conv"
input_indices: 0
input_indices:
1
input_indices:
2
input_sizes: 1024
input_sizes: 576
output_size: 57600
...
...
@@ -186,38 +211,110 @@ layers {
layers {
name: "__mixed_7__"
type: "mixed"
size: 254016
active_type: ""
inputs {
input_layer_name: "img"
}
inputs {
input_layer_name: "img"
proj_conf {
type: "convt"
name: "___mixed_7__.w1"
input_size: 1024
output_size: 254016
conv_conf {
filter_size: 3
channels: 1
stride: 2
padding: 1
groups: 1
filter_channels: 64
output_x: 32
img_size: 63
caffe_mode: true
filter_size_y: 3
padding_y: 1
stride_y: 2
output_y: 32
img_size_y: 63
}
}
}
inputs {
input_layer_name: "filter"
}
operator_confs {
type: "convt"
input_indices: 0
input_indices: 2
input_sizes: 1024
input_sizes: 576
output_size: 254016
conv_conf {
filter_size: 3
channels: 1
stride: 2
padding: 1
groups: 1
filter_channels: 64
output_x: 32
img_size: 63
caffe_mode: true
filter_size_y: 3
padding_y: 1
stride_y: 2
output_y: 32
img_size_y: 63
}
num_filters: 64
}
}
layers {
name: "__mixed_8__"
type: "mixed"
size: 100
active_type: ""
inputs {
input_layer_name: "__mixed_4__"
input_parameter_name: "___mixed_
7
__.w0"
input_parameter_name: "___mixed_
8
__.w0"
proj_conf {
type: "fc"
name: "___mixed_
7
__.w0"
name: "___mixed_
8
__.w0"
input_size: 300
output_size: 100
}
}
inputs {
input_layer_name: "__mixed_5__"
input_parameter_name: "___mixed_
7
__.w1"
input_parameter_name: "___mixed_
8
__.w1"
proj_conf {
type: "trans_fc"
name: "___mixed_
7
__.w1"
name: "___mixed_
8
__.w1"
input_size: 100
output_size: 100
}
}
inputs {
input_layer_name: "__mixed_6__"
input_parameter_name: "___mixed_
7
__.w2"
input_parameter_name: "___mixed_
8
__.w2"
proj_conf {
type: "fc"
name: "___mixed_
7
__.w2"
name: "___mixed_
8
__.w2"
input_size: 57600
output_size: 100
}
}
inputs {
input_layer_name: "__mixed_7__"
input_parameter_name: "___mixed_8__.w3"
proj_conf {
type: "fc"
name: "___mixed_8__.w3"
input_size: 254016
output_size: 100
}
}
drop_rate: 0.5
}
parameters {
...
...
@@ -281,7 +378,7 @@ parameters {
initial_smart: true
}
parameters {
name: "___mixed_
7
__.w0"
name: "___mixed_
8
__.w0"
size: 30000
initial_mean: 0.0
initial_std: 0.057735026919
...
...
@@ -291,7 +388,7 @@ parameters {
initial_smart: true
}
parameters {
name: "___mixed_
7
__.w1"
name: "___mixed_
8
__.w1"
size: 10000
initial_mean: 0.0
initial_std: 0.1
...
...
@@ -301,7 +398,7 @@ parameters {
initial_smart: true
}
parameters {
name: "___mixed_
7
__.w2"
name: "___mixed_
8
__.w2"
size: 5760000
initial_mean: 0.0
initial_std: 0.00416666666667
...
...
@@ -310,10 +407,20 @@ parameters {
initial_strategy: 0
initial_smart: true
}
parameters {
name: "___mixed_8__.w3"
size: 25401600
initial_mean: 0.0
initial_std: 0.00198412698413
dims: 254016
dims: 100
initial_strategy: 0
initial_smart: true
}
input_layer_names: "test"
input_layer_names: "img"
input_layer_names: "filter"
output_layer_names: "__mixed_
7
__"
output_layer_names: "__mixed_
8
__"
sub_models {
name: "root"
layer_names: "test"
...
...
@@ -328,10 +435,11 @@ sub_models {
layer_names: "filter"
layer_names: "__mixed_6__"
layer_names: "__mixed_7__"
layer_names: "__mixed_8__"
input_layer_names: "test"
input_layer_names: "img"
input_layer_names: "filter"
output_layer_names: "__mixed_
7
__"
output_layer_names: "__mixed_
8
__"
is_recurrent_layer_group: false
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录