Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
14f791f2
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
14f791f2
编写于
8月 23, 2017
作者:
C
Cao Ying
提交者:
GitHub
8月 23, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #3606 from emailweixu/width_height
Correctly handle width and height for DataLayer and ScatterAgentLayer.
上级
a0aa9073
99af29e3
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
33 addition
and
13 deletion
+33
-13
python/paddle/trainer/config_parser.py
python/paddle/trainer/config_parser.py
+7
-4
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+26
-9
未找到文件。
python/paddle/trainer/config_parser.py
浏览文件 @
14f791f2
...
@@ -338,7 +338,8 @@ def RecurrentLayerGroupWithoutOutLinksBegin(name,
...
@@ -338,7 +338,8 @@ def RecurrentLayerGroupWithoutOutLinksBegin(name,
in_links_count
+=
1
in_links_count
+=
1
layer_name
=
MakeLayerNameInParentSubmodel
(
name
)
layer_name
=
MakeLayerNameInParentSubmodel
(
name
)
layer
=
g_layer_map
[
layer_name
]
layer
=
g_layer_map
[
layer_name
]
ScatterAgentLayer
(
name
=
name
,
size
=
layer
.
size
)
ScatterAgentLayer
(
name
=
name
,
size
=
layer
.
size
,
width
=
layer
.
width
,
height
=
layer
.
height
)
pair
=
g_current_submodel
.
in_links
.
add
()
pair
=
g_current_submodel
.
in_links
.
add
()
pair
.
layer_name
=
layer_name
pair
.
layer_name
=
layer_name
...
@@ -2197,8 +2198,8 @@ class MaxOutLayer(LayerBase):
...
@@ -2197,8 +2198,8 @@ class MaxOutLayer(LayerBase):
maxout_conf
=
self
.
config
.
inputs
[
0
].
maxout_conf
maxout_conf
=
self
.
config
.
inputs
[
0
].
maxout_conf
parse_maxout
(
self
.
inputs
[
0
].
maxout
,
input_layer
.
name
,
maxout_conf
)
parse_maxout
(
self
.
inputs
[
0
].
maxout
,
input_layer
.
name
,
maxout_conf
)
out_channels
=
maxout_conf
.
image_conf
.
channels
/
maxout_conf
.
groups
out_channels
=
maxout_conf
.
image_conf
.
channels
/
maxout_conf
.
groups
self
.
set_cnn_layer
(
name
,
g_layer_map
[
input_layer
.
name
].
height
,
self
.
set_cnn_layer
(
name
,
maxout_conf
.
image_conf
.
img_size_y
,
g_layer_map
[
input_layer
.
name
].
width
,
out_channels
)
maxout_conf
.
image_conf
.
img_size
,
out_channels
)
@
config_layer
(
'row_conv'
)
@
config_layer
(
'row_conv'
)
...
@@ -2405,9 +2406,11 @@ class GatherAgentLayer(LayerBase):
...
@@ -2405,9 +2406,11 @@ class GatherAgentLayer(LayerBase):
@
config_layer
(
'scatter_agent'
)
@
config_layer
(
'scatter_agent'
)
class
ScatterAgentLayer
(
LayerBase
):
class
ScatterAgentLayer
(
LayerBase
):
def
__init__
(
self
,
name
,
size
,
device
=
None
):
def
__init__
(
self
,
name
,
size
,
width
=
None
,
height
=
None
,
device
=
None
):
super
(
ScatterAgentLayer
,
self
).
__init__
(
super
(
ScatterAgentLayer
,
self
).
__init__
(
name
,
'scatter_agent'
,
size
,
inputs
=
[],
device
=
device
)
name
,
'scatter_agent'
,
size
,
inputs
=
[],
device
=
device
)
if
height
and
width
:
self
.
set_layer_height_width
(
height
,
width
)
@
config_layer
(
'multiplex'
)
@
config_layer
(
'multiplex'
)
...
...
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
14f791f2
...
@@ -16,11 +16,13 @@ import functools
...
@@ -16,11 +16,13 @@ import functools
import
collections
import
collections
import
inspect
import
inspect
import
paddle.trainer.config_parser
as
cp
from
paddle.trainer.config_parser
import
*
from
paddle.trainer.config_parser
import
*
from
.activations
import
LinearActivation
,
SigmoidActivation
,
TanhActivation
,
\
from
.activations
import
LinearActivation
,
SigmoidActivation
,
TanhActivation
,
\
ReluActivation
,
IdentityActivation
,
SoftmaxActivation
,
BaseActivation
ReluActivation
,
IdentityActivation
,
SoftmaxActivation
,
BaseActivation
from
.evaluators
import
*
from
.evaluators
import
*
from
.poolings
import
MaxPooling
,
AvgPooling
,
BasePoolingType
from
.poolings
import
MaxPooling
,
AvgPooling
,
BasePoolingType
,
\
CudnnAvgPooling
,
CudnnMaxPooling
from
.attrs
import
*
from
.attrs
import
*
from
.default_decorators
import
*
from
.default_decorators
import
*
...
@@ -330,6 +332,14 @@ class LayerOutput(object):
...
@@ -330,6 +332,14 @@ class LayerOutput(object):
self
.
outputs
=
outputs
self
.
outputs
=
outputs
self
.
reverse
=
reverse
self
.
reverse
=
reverse
@
property
def
width
(
self
):
return
cp
.
g_layer_map
[
self
.
full_name
].
width
@
property
def
height
(
self
):
return
cp
.
g_layer_map
[
self
.
full_name
].
height
def
set_input
(
self
,
input
):
def
set_input
(
self
,
input
):
"""
"""
Set the input for a memory layer. Can only be used for memory layer
Set the input for a memory layer. Can only be used for memory layer
...
@@ -911,7 +921,13 @@ def data_layer(name, size, height=None, width=None, layer_attr=None):
...
@@ -911,7 +921,13 @@ def data_layer(name, size, height=None, width=None, layer_attr=None):
width
=
width
,
width
=
width
,
**
ExtraLayerAttribute
.
to_kwargs
(
layer_attr
))
**
ExtraLayerAttribute
.
to_kwargs
(
layer_attr
))
return
LayerOutput
(
name
,
LayerType
.
DATA
,
size
=
size
)
num_filters
=
None
if
height
is
not
None
and
width
is
not
None
:
num_filters
=
size
/
(
width
*
height
)
assert
num_filters
*
width
*
height
==
size
,
\
"size=%s width=%s height=%s"
%
(
size
,
width
,
height
)
return
LayerOutput
(
name
,
LayerType
.
DATA
,
size
=
size
,
num_filters
=
num_filters
)
@
wrap_name_default
(
"embedding"
)
@
wrap_name_default
(
"embedding"
)
...
@@ -2571,6 +2587,10 @@ def img_pool_layer(input,
...
@@ -2571,6 +2587,10 @@ def img_pool_layer(input,
assert
input
.
num_filters
is
not
None
assert
input
.
num_filters
is
not
None
num_channels
=
input
.
num_filters
num_channels
=
input
.
num_filters
assert
type
(
pool_type
)
in
[
AvgPooling
,
MaxPooling
,
CudnnAvgPooling
,
CudnnMaxPooling
],
\
"only (Cudnn)AvgPooling, (Cudnn)MaxPooling are supported"
if
pool_type
is
None
:
if
pool_type
is
None
:
pool_type
=
MaxPooling
()
pool_type
=
MaxPooling
()
elif
isinstance
(
pool_type
,
AvgPooling
):
elif
isinstance
(
pool_type
,
AvgPooling
):
...
@@ -2580,7 +2600,6 @@ def img_pool_layer(input,
...
@@ -2580,7 +2600,6 @@ def img_pool_layer(input,
if
(
if
(
isinstance
(
pool_type
,
AvgPooling
)
or
isinstance
(
pool_type
,
MaxPooling
))
\
isinstance
(
pool_type
,
AvgPooling
)
or
isinstance
(
pool_type
,
MaxPooling
))
\
else
pool_type
.
name
else
pool_type
.
name
pool_size_y
=
pool_size
if
pool_size_y
is
None
else
pool_size_y
pool_size_y
=
pool_size
if
pool_size_y
is
None
else
pool_size_y
stride_y
=
stride
if
stride_y
is
None
else
stride_y
stride_y
=
stride
if
stride_y
is
None
else
stride_y
padding_y
=
padding
if
padding_y
is
None
else
padding_y
padding_y
=
padding
if
padding_y
is
None
else
padding_y
...
@@ -4204,8 +4223,7 @@ def conv_operator(img,
...
@@ -4204,8 +4223,7 @@ def conv_operator(img,
num_channels
=
img
.
num_filters
num_channels
=
img
.
num_filters
assert
isinstance
(
filter
,
LayerOutput
)
assert
isinstance
(
filter
,
LayerOutput
)
if
filter
.
size
is
not
None
:
assert
filter
.
size
is
not
None
filter
.
size
=
filter_size
*
filter_size_y
*
num_filters
*
num_channels
opCls
=
ConvTransOperator
if
trans
else
ConvOperator
opCls
=
ConvTransOperator
if
trans
else
ConvOperator
...
@@ -4916,7 +4934,6 @@ def maxout_layer(input, groups, num_channels=None, name=None, layer_attr=None):
...
@@ -4916,7 +4934,6 @@ def maxout_layer(input, groups, num_channels=None, name=None, layer_attr=None):
:return: LayerOutput object.
:return: LayerOutput object.
:rtype: LayerOutput
:rtype: LayerOutput
"""
"""
assert
input
.
layer_type
==
LayerType
.
CONV_LAYER
assert
isinstance
(
input
.
activation
,
LinearActivation
)
assert
isinstance
(
input
.
activation
,
LinearActivation
)
assert
groups
>
1
assert
groups
>
1
if
num_channels
is
None
:
if
num_channels
is
None
:
...
@@ -6219,11 +6236,11 @@ def kmax_sequence_score_layer(input, name=None, beam_size=1):
...
@@ -6219,11 +6236,11 @@ def kmax_sequence_score_layer(input, name=None, beam_size=1):
@
wrap_bias_attr_default
()
@
wrap_bias_attr_default
()
def
scale_shift_layer
(
input
,
name
=
None
,
param_attr
=
None
,
bias_attr
=
None
):
def
scale_shift_layer
(
input
,
name
=
None
,
param_attr
=
None
,
bias_attr
=
None
):
"""
"""
A layer applies a linear transformation to each element in each row of
A layer applies a linear transformation to each element in each row of
the input matrix. For each element, the layer first re-scale it and then
the input matrix. For each element, the layer first re-scale it and then
adds a bias to it.
adds a bias to it.
This layer is very like the SlopeInterceptLayer, except the scale and
This layer is very like the SlopeInterceptLayer, except the scale and
bias are trainable.
bias are trainable.
.. math::
.. math::
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录