Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
01d568e5
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
01d568e5
编写于
1月 18, 2018
作者:
W
wanghaoshuang
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/Paddle
into ctc_evaluator_py
上级
4673a4a9
2360dd20
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
93 addition
and
60 deletion
+93
-60
paddle/operators/math/sequence_padding.cc
paddle/operators/math/sequence_padding.cc
+20
-18
paddle/operators/math/sequence_padding.cu
paddle/operators/math/sequence_padding.cu
+12
-10
python/paddle/v2/fluid/layers/nn.py
python/paddle/v2/fluid/layers/nn.py
+61
-32
未找到文件。
paddle/operators/math/sequence_padding.cc
浏览文件 @
01d568e5
...
...
@@ -32,7 +32,8 @@ class PaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
framework
::
LoD
abs_offset_lod
=
framework
::
ToAbsOffset
(
lod
);
auto
seq_dims
=
seq
.
dims
();
PADDLE_ENFORCE_EQ
(
seq_dims
[
0
],
abs_offset_lod
[
level
].
back
(),
PADDLE_ENFORCE_EQ
(
seq_dims
[
0
],
static_cast
<
int64_t
>
(
abs_offset_lod
[
level
].
back
()),
"The first dimension of LoDTensor seq should be "
"equal to the sum of all sequences's length."
);
...
...
@@ -41,32 +42,32 @@ class PaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
"The input padding should be a 3-D Tensor of shape "
"[max_sequence_length, num_sequences, sequence_width]."
);
const
size
_t
max_sequence_length
=
MaximumSequenceLength
(
lod
,
level
);
const
int64
_t
max_sequence_length
=
MaximumSequenceLength
(
lod
,
level
);
PADDLE_ENFORCE_EQ
(
padding_dims
[
0
],
max_sequence_length
,
"The first dimension of Tensor padding should be the "
"maximum length of all sequences in LoDTensor seq."
);
const
size
_t
num_sequences
=
abs_offset_lod
[
level
].
size
()
-
1
;
const
int64
_t
num_sequences
=
abs_offset_lod
[
level
].
size
()
-
1
;
PADDLE_ENFORCE_EQ
(
padding_dims
[
1
],
num_sequences
,
"The second dimension of Tensor padding should be the "
"number of sequences in LoDTensor seq."
);
const
size
_t
sequence_width
=
seq
.
numel
()
/
seq_dims
[
0
];
const
int64
_t
sequence_width
=
seq
.
numel
()
/
seq_dims
[
0
];
PADDLE_ENFORCE_EQ
(
padding_dims
[
2
],
sequence_width
,
"The third dimension of Tensor padding should be the "
"width of sequence in LoDTensor seq."
);
const
T
*
seq_data
=
seq
.
data
<
T
>
();
T
*
padding_data
=
padding
.
data
<
T
>
();
for
(
size
_t
i
=
0
;
i
<
max_sequence_length
;
++
i
)
{
for
(
size
_t
j
=
0
;
j
<
num_sequences
;
++
j
)
{
size
_t
start_pos
=
abs_offset_lod
[
level
][
j
];
size
_t
sequence_length
=
abs_offset_lod
[
level
][
j
+
1
]
-
start_pos
;
for
(
int64
_t
i
=
0
;
i
<
max_sequence_length
;
++
i
)
{
for
(
int64
_t
j
=
0
;
j
<
num_sequences
;
++
j
)
{
int64
_t
start_pos
=
abs_offset_lod
[
level
][
j
];
int64
_t
sequence_length
=
abs_offset_lod
[
level
][
j
+
1
]
-
start_pos
;
if
(
i
<
sequence_length
)
{
// i > 0 => sequence_length > 0
T
scale
=
norm_by_times
?
(
1.0
f
/
static_cast
<
T
>
(
sequence_length
))
:
1.0
f
;
for
(
size
_t
k
=
0
;
k
<
sequence_width
;
++
k
)
{
for
(
int64
_t
k
=
0
;
k
<
sequence_width
;
++
k
)
{
padding_data
[(
i
*
num_sequences
+
j
)
*
sequence_width
+
k
]
=
seq_data
[(
start_pos
+
i
)
*
sequence_width
+
k
]
*
scale
;
}
...
...
@@ -93,7 +94,8 @@ class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
framework
::
LoD
abs_offset_lod
=
framework
::
ToAbsOffset
(
lod
);
auto
seq_dims
=
seq
.
dims
();
PADDLE_ENFORCE_EQ
(
seq_dims
[
0
],
abs_offset_lod
[
level
].
back
(),
PADDLE_ENFORCE_EQ
(
seq_dims
[
0
],
static_cast
<
int64_t
>
(
abs_offset_lod
[
level
].
back
()),
"The first dimension of LoDTensor seq should be "
"equal to the sum of all sequences's length."
);
...
...
@@ -102,31 +104,31 @@ class UnpaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
"The input padding should be a 3-D Tensor of shape "
"[max_sequnece_length, num_sequences, sequence_width]."
);
const
size
_t
max_sequence_length
=
MaximumSequenceLength
(
lod
,
level
);
const
int64
_t
max_sequence_length
=
MaximumSequenceLength
(
lod
,
level
);
PADDLE_ENFORCE_EQ
(
padding_dims
[
0
],
max_sequence_length
,
"The first dimension of Tensor padding should be "
"the maximum length of all sequences in LoDTensor seq."
);
const
size
_t
num_sequences
=
abs_offset_lod
[
level
].
size
()
-
1
;
const
int64
_t
num_sequences
=
abs_offset_lod
[
level
].
size
()
-
1
;
PADDLE_ENFORCE_EQ
(
padding_dims
[
1
],
num_sequences
,
"The second dimension of Tensor padding should be "
"the number of sequences in LoDTensor seq."
);
const
size
_t
sequence_width
=
seq
.
numel
()
/
seq_dims
[
0
];
const
int64
_t
sequence_width
=
seq
.
numel
()
/
seq_dims
[
0
];
PADDLE_ENFORCE_EQ
(
padding_dims
[
2
],
sequence_width
,
"The third dimension of Tensor padding should be the "
"width of sequence in LoDTensor seq."
);
const
T
*
padding_data
=
padding
.
data
<
T
>
();
T
*
seq_data
=
seq
.
data
<
T
>
();
for
(
size
_t
i
=
0
;
i
<
num_sequences
;
++
i
)
{
size
_t
start_pos
=
abs_offset_lod
[
level
][
i
];
size
_t
sequence_length
=
abs_offset_lod
[
level
][
i
+
1
]
-
start_pos
;
for
(
size
_t
j
=
0
;
j
<
sequence_length
;
++
j
)
{
for
(
int64
_t
i
=
0
;
i
<
num_sequences
;
++
i
)
{
int64
_t
start_pos
=
abs_offset_lod
[
level
][
i
];
int64
_t
sequence_length
=
abs_offset_lod
[
level
][
i
+
1
]
-
start_pos
;
for
(
int64
_t
j
=
0
;
j
<
sequence_length
;
++
j
)
{
// sequence_width > j > 0
T
scale
=
norm_by_times
?
(
1.0
f
/
static_cast
<
T
>
(
sequence_length
))
:
1.0
f
;
for
(
size
_t
k
=
0
;
k
<
sequence_width
;
++
k
)
{
for
(
int64
_t
k
=
0
;
k
<
sequence_width
;
++
k
)
{
seq_data
[(
start_pos
+
j
)
*
sequence_width
+
k
]
=
padding_data
[(
j
*
num_sequences
+
i
)
*
sequence_width
+
k
]
*
scale
;
...
...
paddle/operators/math/sequence_padding.cu
浏览文件 @
01d568e5
...
...
@@ -71,7 +71,8 @@ class PaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
framework
::
LoD
abs_offset_lod
=
framework
::
ToAbsOffset
(
lod
);
auto
seq_dims
=
seq
.
dims
();
PADDLE_ENFORCE_EQ
(
seq_dims
[
0
],
abs_offset_lod
[
level
].
back
(),
PADDLE_ENFORCE_EQ
(
seq_dims
[
0
],
static_cast
<
int64_t
>
(
abs_offset_lod
[
level
].
back
()),
"The first dimension of LoDTensor seq should be "
"equal to the sum of all sequences's length."
);
...
...
@@ -80,17 +81,17 @@ class PaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
"The input padding should be a 3-D Tensor of shape "
"[max_sequence_length, num_sequences, sequence_width]."
);
size
_t
max_sequence_length
=
MaximumSequenceLength
(
lod
,
level
);
int64
_t
max_sequence_length
=
MaximumSequenceLength
(
lod
,
level
);
PADDLE_ENFORCE_EQ
(
padding_dims
[
0
],
max_sequence_length
,
"The first dimension of Tensor padding should be the "
"maximum length of all sequences in LoDTensor seq."
);
const
size
_t
num_sequences
=
abs_offset_lod
[
level
].
size
()
-
1
;
const
int64
_t
num_sequences
=
abs_offset_lod
[
level
].
size
()
-
1
;
PADDLE_ENFORCE_EQ
(
padding_dims
[
1
],
num_sequences
,
"The second dimension of Tensor padding should be the "
"number of sequences in LoDTensor seq."
);
const
size
_t
sequence_width
=
seq
.
numel
()
/
seq_dims
[
0
];
const
int64
_t
sequence_width
=
seq
.
numel
()
/
seq_dims
[
0
];
PADDLE_ENFORCE_EQ
(
padding_dims
[
2
],
sequence_width
,
"The third dimension of Tensor padding should be the "
"width of sequence in LoDTensor seq."
);
...
...
@@ -101,7 +102,7 @@ class PaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
return
;
}
const
size
_t
kBlockSize
=
512
;
const
int64
_t
kBlockSize
=
512
;
/* At least use 32 threads to copy sequence_width elements,
* and at least 8 elements for each thread.
...
...
@@ -143,7 +144,8 @@ class UnpaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
framework
::
LoD
abs_offset_lod
=
framework
::
ToAbsOffset
(
lod
);
auto
seq_dims
=
seq
.
dims
();
PADDLE_ENFORCE_EQ
(
seq_dims
[
0
],
abs_offset_lod
[
level
].
back
(),
PADDLE_ENFORCE_EQ
(
seq_dims
[
0
],
static_cast
<
int64_t
>
(
abs_offset_lod
[
level
].
back
()),
"The first dimension of LoDTensor seq should be "
"equal to the sum of all sequences's length."
);
...
...
@@ -152,17 +154,17 @@ class UnpaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
"The input padding should be a 3-D Tensor of shape "
"[max_sequnece_length, num_sequences, sequence_width]."
);
size
_t
max_sequence_length
=
MaximumSequenceLength
(
lod
,
level
);
int64
_t
max_sequence_length
=
MaximumSequenceLength
(
lod
,
level
);
PADDLE_ENFORCE_EQ
(
padding_dims
[
0
],
max_sequence_length
,
"The first dimension of Tensor padding should be "
"the maximum length of all sequences in LoDTensor seq."
);
const
size
_t
num_sequences
=
abs_offset_lod
[
level
].
size
()
-
1
;
const
int64
_t
num_sequences
=
abs_offset_lod
[
level
].
size
()
-
1
;
PADDLE_ENFORCE_EQ
(
padding_dims
[
1
],
num_sequences
,
"The second dimension of Tensor padding should be "
"the number of sequences in LoDTensor seq."
);
const
size
_t
sequence_width
=
seq
.
numel
()
/
seq_dims
[
0
];
const
int64
_t
sequence_width
=
seq
.
numel
()
/
seq_dims
[
0
];
PADDLE_ENFORCE_EQ
(
padding_dims
[
2
],
sequence_width
,
"The third dimension of Tensor padding should be the "
"width of sequence in LoDTensor seq."
);
...
...
@@ -173,7 +175,7 @@ class UnpaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
return
;
}
const
size
_t
kBlockSize
=
512
;
const
int64
_t
kBlockSize
=
512
;
/* At least use 32 threads to copy sequence_width elements,
* and at least 8 elements for each thread.
...
...
python/paddle/v2/fluid/layers/nn.py
浏览文件 @
01d568e5
...
...
@@ -22,38 +22,14 @@ from ..param_attr import ParamAttr
from
tensor
import
concat
__all__
=
[
'fc'
,
'embedding'
,
'dynamic_lstm'
,
'gru_unit'
,
'linear_chain_crf'
,
'crf_decoding'
,
'cos_sim'
,
'cross_entropy'
,
'square_error_cost'
,
'accuracy'
,
'chunk_eval'
,
'sequence_conv'
,
'conv2d'
,
'sequence_pool'
,
'pool2d'
,
'batch_norm'
,
'beam_search_decode'
,
'conv2d_transpose'
,
'sequence_expand'
,
'lstm_unit'
,
'reduce_sum'
,
'reduce_mean'
,
'reduce_max'
,
'reduce_min'
,
'sequence_first_step'
,
'sequence_last_step'
,
'dropout'
,
'split'
,
'ctc_greedy_decoder'
,
'edit_distance_error'
,
'l2_normalize'
,
'matmul'
,
'fc'
,
'embedding'
,
'dynamic_lstm'
,
'gru_unit'
,
'linear_chain_crf'
,
'crf_decoding'
,
'cos_sim'
,
'cross_entropy'
,
'square_error_cost'
,
'accuracy'
,
'chunk_eval'
,
'sequence_conv'
,
'conv2d'
,
'sequence_pool'
,
'pool2d'
,
'batch_norm'
,
'beam_search_decode'
,
'conv2d_transpose'
,
'sequence_expand'
,
'lstm_unit'
,
'reduce_sum'
,
'reduce_mean'
,
'reduce_max'
,
'reduce_min'
,
'sequence_first_step'
,
'sequence_last_step'
,
'dropout'
,
'split'
,
'ctc_greedy_decoder'
,
'edit_distance_error'
,
'l2_normalize'
,
'matmul'
,
'warpctc'
]
...
...
@@ -1903,3 +1879,56 @@ def ctc_greedy_decoder(input, blank, name=None):
attrs
=
{
"merge_repeated"
:
True
,
"blank"
:
blank
})
return
ctc_out
def
warpctc
(
input
,
label
,
blank
=
0
,
norm_by_times
=
False
,
**
kwargs
):
"""
An operator integrating the open source Warp-CTC library
(https://github.com/baidu-research/warp-ctc)
to compute Connectionist Temporal Classification (CTC) loss.
It can be aliased as softmax with CTC, since a native softmax activation is
interated to the Warp-CTC library, to to normlize values for each row of the
input tensor.
Args:
input(Variable): (LodTensor, default: LoDTensor<float>),
the unscaled probabilities of variable-length sequences,
which is a 2-D Tensor with LoD information.
It's shape is [Lp, num_classes + 1], where Lp is the sum of all input
sequences' length and num_classes is the true number of classes.
(not including the blank label).
label(Variable): (LodTensor, default: LoDTensor<int>), the ground truth
of variable-length sequence, which is a 2-D Tensor with LoD
information. It is of the shape [Lg, 1], where Lg is th sum of
all labels' length.
blank: (int, default: 0), the blank label index of Connectionist
Temporal Classification (CTC) loss, which is in the
half-opened interval [0, num_classes + 1).
norm_by_times: (bool, default: false), whether to normalize
the gradients by the number of time-step,which is also the
sequence's length. There is no need to normalize the gradients
if warpctc layer was follewed by a mean_op.
Returns:
Variable: The Connectionist Temporal Classification (CTC) loss,
which is a 2-D Tensor of the shape [batch_size, 1].
Examples:
.. code-block:: python
y = layers.data(name='y', shape=[11, 8], dtype='float32', lod_level=1)
y_predict = layers.data(name='y_predict', shape=[11, 1], dtype='float32')
cost = layers.warpctc(input=y_predict, label=y)
"""
helper
=
LayerHelper
(
'warpctc'
,
**
kwargs
)
loss_out
=
helper
.
create_tmp_variable
(
dtype
=
input
.
dtype
)
grad_out
=
helper
.
create_tmp_variable
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'warpctc'
,
inputs
=
{
'Logits'
:
[
input
],
'Label'
:
[
label
]},
outputs
=
{
'WarpCTCGrad'
:
[
grad_out
],
'Loss'
:
[
loss_out
]},
attrs
=
{
'blank'
:
blank
,
'norm_by_times'
:
norm_by_times
})
return
loss_out
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录