Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
91aac572
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
91aac572
编写于
2月 12, 2018
作者:
F
fengjiayi
提交者:
GitHub
2月 12, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #8405 from JiayiFeng/dev_remove_kwargs
remove `**kwargs` in layer interfaces
上级
057efd17
8c302d48
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
27 addition
and
27 deletion
+27
-27
python/paddle/v2/fluid/layers/nn.py
python/paddle/v2/fluid/layers/nn.py
+27
-27
未找到文件。
python/paddle/v2/fluid/layers/nn.py
浏览文件 @
91aac572
...
...
@@ -831,12 +831,12 @@ def crf_decoding(input, param_attr, label=None):
return
viterbi_path
def
cos_sim
(
X
,
Y
,
**
kwargs
):
def
cos_sim
(
X
,
Y
):
"""
This function performs the cosine similarity between two tensors
X and Y and returns that as the output.
"""
helper
=
LayerHelper
(
'cos_sim'
,
**
kwargs
)
helper
=
LayerHelper
(
'cos_sim'
,
**
locals
()
)
out
=
helper
.
create_tmp_variable
(
dtype
=
X
.
dtype
)
xnorm
=
helper
.
create_tmp_variable
(
dtype
=
X
.
dtype
)
ynorm
=
helper
.
create_tmp_variable
(
dtype
=
X
.
dtype
)
...
...
@@ -850,7 +850,7 @@ def cos_sim(X, Y, **kwargs):
return
out
def
dropout
(
x
,
dropout_prob
,
is_test
=
False
,
seed
=
None
,
**
kwargs
):
def
dropout
(
x
,
dropout_prob
,
is_test
=
False
,
seed
=
None
):
"""
Computes dropout.
...
...
@@ -879,7 +879,7 @@ def dropout(x, dropout_prob, is_test=False, seed=None, **kwargs):
droped = fluid.layers.dropout(input=x, dropout_rate=0.5)
"""
helper
=
LayerHelper
(
'dropout'
,
**
kwargs
)
helper
=
LayerHelper
(
'dropout'
,
**
locals
()
)
out
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
mask
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
helper
.
append_op
(
...
...
@@ -896,7 +896,7 @@ def dropout(x, dropout_prob, is_test=False, seed=None, **kwargs):
return
out
def
cross_entropy
(
input
,
label
,
**
kwargs
):
def
cross_entropy
(
input
,
label
,
soft_label
=
False
):
"""
**Cross Entropy Layer**
...
...
@@ -905,15 +905,15 @@ def cross_entropy(input, label, **kwargs):
computation.
1) One-hot cross-entropy:
`soft_label = False`, `Label[i, 0]` indicates the class index for sample i:
`soft_label = False`, `Label[i, 0]` indicates the class index for sample i:
.. math::
Y[i] = -\log(X[i, Label[i]])
2) Soft-label cross-entropy:
`soft_label = True`, `Label[i, j]` indicates the soft label of class j
for sample i:
`soft_label = True`, `Label[i, j]` indicates the soft label of class j
for sample i:
.. math::
...
...
@@ -923,8 +923,8 @@ def cross_entropy(input, label, **kwargs):
equals one.
3) One-hot cross-entropy with vecterized `label`:
As a special case of 2), when each row of 'label' has only one
non-zero element which is equal to 1, soft-label cross-entropy degenerates
As a special case of 2), when each row of 'label' has only one
non-zero element which is equal to 1, soft-label cross-entropy degenerates
to a one-hot cross-entropy with one-hot label representation.
Args:
...
...
@@ -938,7 +938,7 @@ def cross_entropy(input, label, **kwargs):
tensor<int64> with shape [N x 1]. When
`soft_label` is set to `True`, `label` is a
tensor<float/double> with shape [N x D].
soft_label (bool
, via `**kwargs`
): a flag indicating whether to
soft_label (bool): a flag indicating whether to
interpretate the given labels as soft
labels, default `False`.
...
...
@@ -958,18 +958,18 @@ def cross_entropy(input, label, **kwargs):
predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label)
"""
helper
=
LayerHelper
(
'cross_entropy'
,
**
kwargs
)
helper
=
LayerHelper
(
'cross_entropy'
,
**
locals
()
)
out
=
helper
.
create_tmp_variable
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'cross_entropy'
,
inputs
=
{
'X'
:
[
input
],
'Label'
:
[
label
]},
outputs
=
{
'Y'
:
[
out
]},
attrs
=
kwargs
)
attrs
=
{
"soft_label"
:
soft_label
}
)
return
out
def
square_error_cost
(
input
,
label
,
**
kwargs
):
def
square_error_cost
(
input
,
label
):
"""
**Square error cost layer**
...
...
@@ -1004,7 +1004,7 @@ def square_error_cost(input, label, **kwargs):
cost = layers.square_error_cost(input=y_predict, label=y)
"""
helper
=
LayerHelper
(
'square_error_cost'
,
**
kwargs
)
helper
=
LayerHelper
(
'square_error_cost'
,
**
locals
()
)
minus_out
=
helper
.
create_tmp_variable
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'elementwise_sub'
,
...
...
@@ -1019,12 +1019,12 @@ def square_error_cost(input, label, **kwargs):
return
square_out
def
accuracy
(
input
,
label
,
k
=
1
,
correct
=
None
,
total
=
None
,
**
kwargs
):
def
accuracy
(
input
,
label
,
k
=
1
,
correct
=
None
,
total
=
None
):
"""
This function computes the accuracy using the input and label.
The output is the top_k inputs and their indices.
"""
helper
=
LayerHelper
(
"accuracy"
,
**
kwargs
)
helper
=
LayerHelper
(
"accuracy"
,
**
locals
()
)
topk_out
=
helper
.
create_tmp_variable
(
dtype
=
input
.
dtype
)
topk_indices
=
helper
.
create_tmp_variable
(
dtype
=
"int64"
)
helper
.
append_op
(
...
...
@@ -1057,13 +1057,12 @@ def chunk_eval(input,
label
,
chunk_scheme
,
num_chunk_types
,
excluded_chunk_types
=
None
,
**
kwargs
):
excluded_chunk_types
=
None
):
"""
This function computes and outputs the precision, recall and
F1-score of chunk detection.
"""
helper
=
LayerHelper
(
"chunk_eval"
,
**
kwargs
)
helper
=
LayerHelper
(
"chunk_eval"
,
**
locals
()
)
# prepare output
precision
=
helper
.
create_tmp_variable
(
dtype
=
"float32"
)
...
...
@@ -1295,7 +1294,7 @@ def conv2d(input,
return
helper
.
append_activation
(
pre_act
)
def
sequence_pool
(
input
,
pool_type
,
**
kwargs
):
def
sequence_pool
(
input
,
pool_type
):
"""
This function add the operator for sequence pooling.
It pools features of all time-steps of each instance, and is applied
...
...
@@ -1345,7 +1344,7 @@ def sequence_pool(input, pool_type, **kwargs):
sqrt_x = fluid.layers.sequence_pool(input=x, pool_type='sqrt')
max_x = fluid.layers.sequence_pool(input=x, pool_type='max')
"""
helper
=
LayerHelper
(
'sequence_pool'
,
input
=
input
,
**
kwargs
)
helper
=
LayerHelper
(
'sequence_pool'
,
**
locals
()
)
dtype
=
helper
.
input_dtype
()
pool_out
=
helper
.
create_tmp_variable
(
dtype
)
max_index
=
helper
.
create_tmp_variable
(
dtype
)
...
...
@@ -1365,7 +1364,7 @@ def sequence_pool(input, pool_type, **kwargs):
return
pool_out
def
sequence_first_step
(
input
,
**
kwargs
):
def
sequence_first_step
(
input
):
"""
This funciton get the first step of sequence.
...
...
@@ -1398,7 +1397,7 @@ def sequence_first_step(input, **kwargs):
return
sequence_pool
(
input
=
input
,
pool_type
=
"first"
)
def
sequence_last_step
(
input
,
**
kwargs
):
def
sequence_last_step
(
input
):
"""
This funciton get the last step of sequence.
...
...
@@ -2338,7 +2337,8 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
normed = fluid.layers.l2_normalize(x=data, axis=1)
"""
if
len
(
x
.
shape
)
==
1
:
axis
=
0
if
len
(
x
.
shape
)
==
1
:
axis
=
0
helper
=
LayerHelper
(
"l2_normalize"
,
**
locals
())
...
...
@@ -2656,7 +2656,7 @@ def ctc_greedy_decoder(input, blank, name=None):
return
ctc_out
def
warpctc
(
input
,
label
,
blank
=
0
,
norm_by_times
=
False
,
**
kwargs
):
def
warpctc
(
input
,
label
,
blank
=
0
,
norm_by_times
=
False
):
"""
An operator integrating the open source Warp-CTC library
(https://github.com/baidu-research/warp-ctc)
...
...
@@ -2697,7 +2697,7 @@ def warpctc(input, label, blank=0, norm_by_times=False, **kwargs):
cost = layers.warpctc(input=y_predict, label=y)
"""
helper
=
LayerHelper
(
'warpctc'
,
**
kwargs
)
helper
=
LayerHelper
(
'warpctc'
,
**
locals
()
)
loss_out
=
helper
.
create_tmp_variable
(
dtype
=
input
.
dtype
)
grad_out
=
helper
.
create_tmp_variable
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录