Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
8b9e678d
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8b9e678d
编写于
7月 26, 2017
作者:
C
caoying03
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix dropout and clipping setttings in layer helpers.
上级
eff17a68
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
12 addition
and
21 deletion
+12
-21
python/paddle/trainer_config_helpers/attrs.py
python/paddle/trainer_config_helpers/attrs.py
+1
-1
python/paddle/trainer_config_helpers/layers.py
python/paddle/trainer_config_helpers/layers.py
+11
-20
未找到文件。
python/paddle/trainer_config_helpers/attrs.py
浏览文件 @
8b9e678d
...
@@ -272,7 +272,7 @@ class ExtraLayerAttribute(object):
...
@@ -272,7 +272,7 @@ class ExtraLayerAttribute(object):
for
key
in
self
.
attr
:
for
key
in
self
.
attr
:
if
not
hasattr
(
self
,
'can_%s'
%
key
)
or
\
if
not
hasattr
(
self
,
'can_%s'
%
key
)
or
\
not
getattr
(
self
,
'can_%s'
%
key
):
not
getattr
(
self
,
'can_%s'
%
key
):
raise
NotImplementedError
(
"Layer %s
can
not support %s"
%
raise
NotImplementedError
(
"Layer %s
does
not support %s"
%
(
layer_name
,
key
))
(
layer_name
,
key
))
@
staticmethod
@
staticmethod
...
...
python/paddle/trainer_config_helpers/layers.py
浏览文件 @
8b9e678d
...
@@ -865,7 +865,7 @@ def data_layer(name, size, height=None, width=None, layer_attr=None):
...
@@ -865,7 +865,7 @@ def data_layer(name, size, height=None, width=None, layer_attr=None):
@
wrap_name_default
(
"embedding"
)
@
wrap_name_default
(
"embedding"
)
@
wrap_param_attr_default
()
@
wrap_param_attr_default
()
@
layer_support
(
ERROR_CLIPPING
)
@
layer_support
(
ERROR_CLIPPING
,
DROPOUT
)
def
embedding_layer
(
input
,
size
,
name
=
None
,
param_attr
=
None
,
layer_attr
=
None
):
def
embedding_layer
(
input
,
size
,
name
=
None
,
param_attr
=
None
,
layer_attr
=
None
):
"""
"""
Define a embedding Layer.
Define a embedding Layer.
...
@@ -1320,7 +1320,7 @@ def pooling_layer(input,
...
@@ -1320,7 +1320,7 @@ def pooling_layer(input,
@
wrap_act_default
(
param_names
=
[
'gate_act'
],
act
=
SigmoidActivation
())
@
wrap_act_default
(
param_names
=
[
'gate_act'
],
act
=
SigmoidActivation
())
@
wrap_act_default
(
param_names
=
[
"act"
,
'state_act'
],
act
=
TanhActivation
())
@
wrap_act_default
(
param_names
=
[
"act"
,
'state_act'
],
act
=
TanhActivation
())
@
wrap_name_default
(
"lstmemory"
)
@
wrap_name_default
(
"lstmemory"
)
@
layer_support
(
DROPOUT
)
@
layer_support
()
def
lstmemory
(
input
,
def
lstmemory
(
input
,
name
=
None
,
name
=
None
,
size
=
None
,
size
=
None
,
...
@@ -1429,7 +1429,7 @@ def lstmemory(input,
...
@@ -1429,7 +1429,7 @@ def lstmemory(input,
@
wrap_act_default
(
param_names
=
[
'gate_act'
],
act
=
SigmoidActivation
())
@
wrap_act_default
(
param_names
=
[
'gate_act'
],
act
=
SigmoidActivation
())
@
wrap_act_default
(
param_names
=
[
"act"
],
act
=
TanhActivation
())
@
wrap_act_default
(
param_names
=
[
"act"
],
act
=
TanhActivation
())
@
wrap_name_default
(
"gru"
)
@
wrap_name_default
(
"gru"
)
@
layer_support
(
DROPOUT
)
@
layer_support
()
def
grumemory
(
input
,
def
grumemory
(
input
,
size
=
None
,
size
=
None
,
name
=
None
,
name
=
None
,
...
@@ -1793,7 +1793,7 @@ def repeat_layer(input,
...
@@ -1793,7 +1793,7 @@ def repeat_layer(input,
@
wrap_name_default
(
"seqreshape"
)
@
wrap_name_default
(
"seqreshape"
)
@
wrap_act_default
(
act
=
IdentityActivation
())
@
wrap_act_default
(
act
=
IdentityActivation
())
@
wrap_bias_attr_default
(
has_bias
=
False
)
@
wrap_bias_attr_default
(
has_bias
=
False
)
@
layer_support
()
@
layer_support
(
ERROR_CLIPPING
,
DROPOUT
)
def
seq_reshape_layer
(
input
,
def
seq_reshape_layer
(
input
,
reshape_size
,
reshape_size
,
act
=
None
,
act
=
None
,
...
@@ -2703,7 +2703,7 @@ def img_cmrnorm_layer(input,
...
@@ -2703,7 +2703,7 @@ def img_cmrnorm_layer(input,
default_factory
=
lambda
_
:
ParamAttr
(
initial_mean
=
1.0
,
initial_std
=
0.
))
default_factory
=
lambda
_
:
ParamAttr
(
initial_mean
=
1.0
,
initial_std
=
0.
))
@
wrap_act_default
(
act
=
ReluActivation
())
@
wrap_act_default
(
act
=
ReluActivation
())
@
wrap_name_default
(
"batch_norm"
)
@
wrap_name_default
(
"batch_norm"
)
@
layer_support
(
DROPOUT
)
@
layer_support
(
DROPOUT
,
ERROR_CLIPPING
)
def
batch_norm_layer
(
input
,
def
batch_norm_layer
(
input
,
act
=
None
,
act
=
None
,
name
=
None
,
name
=
None
,
...
@@ -2783,15 +2783,6 @@ def batch_norm_layer(input,
...
@@ -2783,15 +2783,6 @@ def batch_norm_layer(input,
:return: LayerOutput object.
:return: LayerOutput object.
:rtype: LayerOutput
:rtype: LayerOutput
"""
"""
if
not
isinstance
(
act
,
ReluActivation
):
logger
.
log
(
logging
.
WARN
,
"%s is not recommend for batch normalization's activation, "
"maybe the relu is better"
%
act
.
name
)
if
not
isinstance
(
input
.
activation
,
LinearActivation
):
logger
.
log
(
logging
.
WARN
,
"The activation should be inside batch normalization, the "
"previous layer's activation may be Linear"
)
if
num_channels
is
None
:
if
num_channels
is
None
:
if
input
.
num_filters
is
not
None
:
if
input
.
num_filters
is
not
None
:
...
@@ -2861,7 +2852,7 @@ def sum_to_one_norm_layer(input, name=None, layer_attr=None):
...
@@ -2861,7 +2852,7 @@ def sum_to_one_norm_layer(input, name=None, layer_attr=None):
@
wrap_name_default
(
"addto"
)
@
wrap_name_default
(
"addto"
)
@
wrap_act_default
(
act
=
LinearActivation
())
@
wrap_act_default
(
act
=
LinearActivation
())
@
wrap_bias_attr_default
(
has_bias
=
False
)
@
wrap_bias_attr_default
(
has_bias
=
False
)
@
layer_support
(
DROPOUT
)
@
layer_support
(
DROPOUT
,
ERROR_CLIPPING
)
def
addto_layer
(
input
,
act
=
None
,
name
=
None
,
bias_attr
=
None
,
layer_attr
=
None
):
def
addto_layer
(
input
,
act
=
None
,
name
=
None
,
bias_attr
=
None
,
layer_attr
=
None
):
"""
"""
AddtoLayer.
AddtoLayer.
...
@@ -2940,7 +2931,7 @@ def addto_layer(input, act=None, name=None, bias_attr=None, layer_attr=None):
...
@@ -2940,7 +2931,7 @@ def addto_layer(input, act=None, name=None, bias_attr=None, layer_attr=None):
@
wrap_act_default
(
act
=
IdentityActivation
())
@
wrap_act_default
(
act
=
IdentityActivation
())
@
wrap_name_default
(
"concat"
)
@
wrap_name_default
(
"concat"
)
@
layer_support
()
@
layer_support
(
DROPOUT
,
ERROR_CLIPPING
)
def
concat_layer
(
input
,
act
=
None
,
name
=
None
,
layer_attr
=
None
,
bias_attr
=
None
):
def
concat_layer
(
input
,
act
=
None
,
name
=
None
,
layer_attr
=
None
,
bias_attr
=
None
):
"""
"""
Concat all input vector into one huge vector.
Concat all input vector into one huge vector.
...
@@ -3024,7 +3015,7 @@ def concat_layer(input, act=None, name=None, layer_attr=None, bias_attr=None):
...
@@ -3024,7 +3015,7 @@ def concat_layer(input, act=None, name=None, layer_attr=None, bias_attr=None):
@
wrap_name_default
(
"seqconcat"
)
@
wrap_name_default
(
"seqconcat"
)
@
wrap_act_default
(
act
=
IdentityActivation
())
@
wrap_act_default
(
act
=
IdentityActivation
())
@
wrap_bias_attr_default
(
has_bias
=
False
)
@
wrap_bias_attr_default
(
has_bias
=
False
)
@
layer_support
()
@
layer_support
(
DROPOUT
,
ERROR_CLIPPING
)
def
seq_concat_layer
(
a
,
b
,
act
=
None
,
name
=
None
,
layer_attr
=
None
,
def
seq_concat_layer
(
a
,
b
,
act
=
None
,
name
=
None
,
layer_attr
=
None
,
bias_attr
=
None
):
bias_attr
=
None
):
"""
"""
...
@@ -3177,7 +3168,7 @@ def memory(name,
...
@@ -3177,7 +3168,7 @@ def memory(name,
@
wrap_act_default
(
param_names
=
[
'state_act'
],
act
=
TanhActivation
())
@
wrap_act_default
(
param_names
=
[
'state_act'
],
act
=
TanhActivation
())
@
wrap_act_default
(
act
=
TanhActivation
())
@
wrap_act_default
(
act
=
TanhActivation
())
@
wrap_name_default
(
'lstm_step'
)
@
wrap_name_default
(
'lstm_step'
)
@
layer_support
(
ERROR_CLIPPING
,
DROPOUT
)
@
layer_support
()
def
lstm_step_layer
(
input
,
def
lstm_step_layer
(
input
,
state
,
state
,
size
=
None
,
size
=
None
,
...
@@ -4480,7 +4471,7 @@ def tensor_layer(a,
...
@@ -4480,7 +4471,7 @@ def tensor_layer(a,
@
wrap_param_attr_default
()
@
wrap_param_attr_default
()
@
wrap_bias_attr_default
()
@
wrap_bias_attr_default
()
@
wrap_act_default
()
@
wrap_act_default
()
@
layer_support
()
@
layer_support
(
DROPOUT
,
ERROR_CLIPPING
)
def
selective_fc_layer
(
input
,
def
selective_fc_layer
(
input
,
size
,
size
,
select
=
None
,
select
=
None
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录