Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
b59f35ef
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b59f35ef
编写于
2月 27, 2017
作者:
D
dangqingqing
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
follow some comments
上级
aa92f0b6
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
21 addition
and
54 deletion
+21
-54
python/paddle/v2/layer.py
python/paddle/v2/layer.py
+16
-45
python/paddle/v2/tests/test_layer.py
python/paddle/v2/tests/test_layer.py
+5
-9
未找到文件。
python/paddle/v2/layer.py
浏览文件 @
b59f35ef
...
...
@@ -82,27 +82,10 @@ import activation
import
attr
__all__
=
[
'parse_network'
,
'data'
,
'fc'
,
'max_id'
,
'classification_cost'
,
'cross_entropy_cost'
,
'cross_entropy_with_selfnorm_cost'
,
'regression_cost'
,
'multi_binary_label_cross_entropy_cost'
,
'rank_cost'
,
'lambda_cost'
,
'sum_cost'
,
'huber_cost'
'full_matrix_projection'
,
'trans_full_matrix_projection'
,
'table_projection'
,
'identity_projection'
,
'scaling_projection'
,
'dotmul_projection'
,
'context_projection'
,
'conv_projection'
,
'parse_network'
,
'data'
,
'fc'
,
'max_id'
,
'classification_cost'
,
'cross_entropy_cost'
,
'cross_entropy_with_selfnorm_cost'
,
'regression_cost'
,
'multi_binary_label_cross_entropy_cost'
,
'rank_cost'
,
'lambda_cost'
,
'sum_cost'
,
'huber_cost'
]
__projection_names__
=
filter
(
lambda
x
:
x
.
endswith
(
'_projection'
),
...
...
@@ -167,7 +150,7 @@ def __convert_to_v2__(method_name, name_prefix=None, parent_names=None):
wrapper
=
None
class
V2LayerImpl
(
Layer
):
def
__init__
(
self
,
name
=
None
,
**
kwargs
):
def
__init__
(
self
,
**
kwargs
):
parent_layers
=
dict
()
other_kwargs
=
dict
()
for
pname
in
parent_names
:
...
...
@@ -178,6 +161,7 @@ def __convert_to_v2__(method_name, name_prefix=None, parent_names=None):
if
key
not
in
parent_names
:
other_kwargs
[
key
]
=
kwargs
[
key
]
name
=
kwargs
[
'name'
]
if
kwargs
.
has_key
(
'name'
)
else
None
super
(
V2LayerImpl
,
self
).
__init__
(
name
,
parent_layers
)
self
.
__other_kwargs__
=
other_kwargs
...
...
@@ -242,32 +226,30 @@ class MixedLayerV2(Layer):
layer_attr
=
None
):
self
.
__method_name__
=
'mixed_layer'
self
.
finalized
=
False
self
.
__parent_layers__
=
dict
()
other_kwargs
=
dict
()
self
.
input_name
=
'input'
self
.
__parent_layers__
[
self
.
input_name
]
=
[]
self
.
__inputs__
=
[]
if
input
is
not
None
:
self
.
__
parent_layers__
[
self
.
input_name
]
=
input
self
.
__
inputs__
=
input
self
.
name
=
name
other_kwargs
=
dict
()
other_kwargs
[
'name'
]
=
name
other_kwargs
[
'size'
]
=
size
other_kwargs
[
'act'
]
=
act
other_kwargs
[
'bias_attr'
]
=
bias_attr
other_kwargs
[
'layer_attr'
]
=
layer_attr
Layer
.
__init__
(
self
,
name
,
self
.
__parent_layers__
)
parent_layers
=
{
"input"
:
self
.
__inputs__
}
super
(
MixedLayerV2
,
self
).
__init__
(
name
,
parent_layers
)
self
.
__other_kwargs__
=
other_kwargs
def
__iadd__
(
self
,
other
):
if
not
self
.
finalized
:
self
.
__
parent_layers__
[
self
.
input_name
]
.
append
(
other
)
self
.
__
inputs__
.
append
(
other
)
return
self
else
:
raise
MixedLayerTypeV2
.
AddToSealedMixedLayerExceptionV2
()
def
__enter__
(
self
):
assert
len
(
self
.
__
parent_layers__
[
self
.
input_name
]
)
==
0
assert
len
(
self
.
__
inputs__
)
==
0
return
self
def
__exit__
(
self
,
*
args
,
**
kwargs
):
...
...
@@ -279,7 +261,7 @@ class MixedLayerV2(Layer):
args
[
each
]
=
kwargs
[
each
]
for
each
in
self
.
__other_kwargs__
:
args
[
each
]
=
self
.
__other_kwargs__
[
each
]
return
getattr
(
conf_helps
,
self
.
__method_name__
)(
name
=
self
.
name
,
**
args
)
return
getattr
(
conf_helps
,
self
.
__method_name__
)(
**
args
)
@
wrap_name_default
(
"mixed"
)
...
...
@@ -331,18 +313,7 @@ huber_cost = __convert_to_v2__(
'huber_cost'
,
name_prefix
=
'huber_cost'
,
parent_names
=
[
'input'
,
'label'
])
# convert projection
projection_list
=
[
# [V1_method_name], all the parent_names is `input`
'full_matrix_projection'
,
'trans_full_matrix_projection'
,
'table_projection'
,
'scaling_projection'
,
'dotmul_projection'
,
'context_projection'
,
'conv_projection'
,
'identity_projection'
,
]
for
prj
in
projection_list
:
for
prj
in
__projection_names__
:
globals
()[
prj
]
=
__convert_to_v2__
(
prj
,
parent_names
=
[
'input'
])
# convert operator
...
...
python/paddle/v2/tests/test_layer.py
浏览文件 @
b59f35ef
...
...
@@ -32,7 +32,7 @@ inference = layer.fc(input=hidden, size=10, act=activation.Softmax())
class
CostLayerTest
(
unittest
.
TestCase
):
def
test_cost_layer
(
self
):
def
not_
test_cost_layer
(
self
):
cost1
=
layer
.
classification_cost
(
input
=
inference
,
label
=
label
)
cost2
=
layer
.
classification_cost
(
input
=
inference
,
label
=
label
,
weight
=
weight
)
...
...
@@ -60,12 +60,8 @@ class CostLayerTest(unittest.TestCase):
input
=
layer
.
data
(
name
=
'data'
,
type
=
data_type
.
dense_vector
(
784
))
word
=
layer
.
data
(
name
=
'word'
,
type
=
data_type
.
integer_value_sequence
(
10000
))
fc0
=
layer
.
fc
(
input
=
input
,
size
=
100
,
act
=
conf_helps
.
SigmoidActivation
())
fc1
=
layer
.
fc
(
input
=
input
,
size
=
200
,
act
=
conf_helps
.
SigmoidActivation
())
fc0
=
layer
.
fc
(
input
=
input
,
size
=
100
,
act
=
activation
.
Sigmoid
())
fc1
=
layer
.
fc
(
input
=
input
,
size
=
200
,
act
=
activation
.
Sigmoid
())
mixed0
=
layer
.
mixed
(
size
=
256
,
input
=
[
...
...
@@ -121,8 +117,8 @@ class CostLayerTest(unittest.TestCase):
def
test_operator
(
self
):
ipt0
=
layer
.
data
(
name
=
'data'
,
type
=
data_type
.
dense_vector
(
784
))
ipt1
=
layer
.
data
(
name
=
'word'
,
type
=
data_type
.
dense_vector
(
128
))
fc0
=
layer
.
fc
(
input
=
ipt0
,
size
=
100
,
act
=
conf_helps
.
SigmoidActivation
())
fc1
=
layer
.
fc
(
input
=
ipt0
,
size
=
100
,
act
=
conf_helps
.
SigmoidActivation
())
fc0
=
layer
.
fc
(
input
=
ipt0
,
size
=
100
,
act
=
activation
.
Sigmoid
())
fc1
=
layer
.
fc
(
input
=
ipt0
,
size
=
100
,
act
=
activation
.
Sigmoid
())
dotmul_op
=
layer
.
dotmul_operator
(
a
=
fc0
,
b
=
fc1
)
dotmul0
=
layer
.
mixed
(
input
=
dotmul_op
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录