Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
b1af5e43
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b1af5e43
编写于
1月 11, 2018
作者:
W
wanghaoshuang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
1. Fix warpctc grad op
2. Add check grad test
上级
df927768
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
196 addition
and
9 deletion
+196
-9
paddle/operators/CMakeLists.txt
paddle/operators/CMakeLists.txt
+1
-1
paddle/operators/math/CMakeLists.txt
paddle/operators/math/CMakeLists.txt
+2
-0
paddle/operators/math/sequence_scale.cc
paddle/operators/math/sequence_scale.cc
+46
-0
paddle/operators/math/sequence_scale.cu
paddle/operators/math/sequence_scale.cu
+61
-0
paddle/operators/math/sequence_scale.h
paddle/operators/math/sequence_scale.h
+53
-0
paddle/operators/warpctc_op.h
paddle/operators/warpctc_op.h
+28
-1
python/paddle/v2/fluid/tests/test_warpctc_op.py
python/paddle/v2/fluid/tests/test_warpctc_op.py
+5
-7
未找到文件。
paddle/operators/CMakeLists.txt
浏览文件 @
b1af5e43
...
...
@@ -151,7 +151,7 @@ op_library(lstm_op DEPS sequence2batch lstm_compute)
op_library
(
conv_transpose_op DEPS vol2col
)
op_library
(
gru_op DEPS sequence2batch gru_compute
)
op_library
(
recurrent_op DEPS executor
)
op_library
(
warpctc_op DEPS dynload_warpctc sequence_padding math_function
)
op_library
(
warpctc_op DEPS dynload_warpctc sequence_padding
sequence_scale
math_function
)
op_library
(
cos_sim_op DEPS cos_sim_functor
)
op_library
(
parallel_do_op DEPS executor
)
# FIXME(typhoonzero): save/load depends lodtensor serialization functions
...
...
paddle/operators/math/CMakeLists.txt
浏览文件 @
b1af5e43
...
...
@@ -13,6 +13,7 @@ if(WITH_GPU)
nv_library
(
context_project SRCS context_project.cc context_project.cu DEPS device_context math_function
)
nv_library
(
sequence2batch SRCS sequence2batch.cc sequence2batch.cu DEPS device_context tensor
)
nv_library
(
sequence_padding SRCS sequence_padding.cc sequence_padding.cu DEPS lod_tensor device_context
)
nv_library
(
sequence_scale SRCS sequence_scale.cc sequence_scale.cu DEPS lod_tensor device_context
)
nv_library
(
lstm_compute SRCS lstm_compute.cc lstm_compute.cu DEPS device_context activation_functions
)
nv_library
(
maxouting SRCS maxouting.cc maxouting.cu DEPS device_context
)
nv_library
(
unpooling SRCS unpooling.cc unpooling.cu DEPS device_context
)
...
...
@@ -29,6 +30,7 @@ else()
cc_library
(
context_project SRCS context_project.cc DEPS device_context math_function
)
cc_library
(
sequence2batch SRCS sequence2batch.cc DEPS device_context tensor
)
cc_library
(
sequence_padding SRCS sequence_padding.cc DEPS lod_tensor device_context
)
cc_library
(
sequence_scale SRCS sequence_scale.cc DEPS lod_tensor device_context
)
cc_library
(
lstm_compute SRCS lstm_compute.cc DEPS device_context activation_functions
)
cc_library
(
maxouting SRCS maxouting.cc DEPS device_context
)
cc_library
(
unpooling SRCS unpooling.cc DEPS device_context
)
...
...
paddle/operators/math/sequence_scale.cc
0 → 100644
浏览文件 @
b1af5e43
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/sequence_scale.h"
namespace
paddle
{
namespace
operators
{
namespace
math
{
template
<
typename
T
>
class
ScaleLoDTensorFunctor
<
platform
::
CPUDeviceContext
,
T
>
{
public:
void
operator
()(
const
platform
::
CPUDeviceContext
&
context
,
framework
::
LoDTensor
&
seq
,
const
T
*
scales
,
const
size_t
num_seq
)
{
const
size_t
level
=
0
;
auto
lod
=
seq
.
lod
();
size_t
seq_width
=
seq
.
dims
()[
1
];
framework
::
LoD
abs_offset_lod
=
framework
::
ToAbsOffset
(
lod
);
T
*
seq_data
=
seq
.
mutable_data
<
T
>
(
context
.
GetPlace
());
for
(
size_t
i
=
0
;
i
<
num_seq
;
++
i
)
{
for
(
size_t
j
=
lod
[
level
][
i
]
*
seq_width
;
j
<
lod
[
level
][
i
+
1
]
*
seq_width
;
++
j
)
{
seq_data
[
j
]
*=
scales
[
i
];
}
}
}
};
template
class
ScaleLoDTensorFunctor
<
platform
::
CPUDeviceContext
,
float
>;
}
// namespace math
}
// namespace operators
}
// namespace paddle
paddle/operators/math/sequence_scale.cu
0 → 100644
浏览文件 @
b1af5e43
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/sequence_scale.h"
namespace
paddle
{
namespace
operators
{
namespace
math
{
template
<
typename
T
>
__global__
void
SequenceScaleKernel
(
T
*
seq
,
size_t
*
lod
,
const
T
*
scales
,
const
size_t
num_seq
,
const
size_t
seq_width
)
{
size_t
idx
=
blockIdx
.
x
*
blockDim
.
y
+
threadIdx
.
y
;
if
(
idx
<
lod
[
num_seq
])
{
size_t
i
=
0
;
for
(
i
=
0
;
i
<
num_seq
;
++
i
)
{
if
(
idx
<
lod
[
i
+
1
]
*
seq_width
)
{
break
;
}
}
seq
[
i
]
*=
scales
[
i
];
}
}
template
<
typename
T
>
class
ScaleLoDTensorFunctor
<
platform
::
CUDADeviceContext
,
T
>
{
public:
void
operator
()(
const
platform
::
CUDADeviceContext
&
context
,
framework
::
LoDTensor
&
seq
,
const
T
*
scales
,
const
size_t
num_seq
)
{
auto
lod
=
seq
.
lod
();
const
size_t
seq_width
=
seq
.
dims
()[
1
];
const
size_t
level
=
0
;
framework
::
LoD
abs_offset_lod
=
framework
::
ToAbsOffset
(
lod
);
T
*
seq_data
=
seq
.
mutable_data
<
T
>
(
context
.
GetPlace
());
int
threads
=
1024
;
int
grid
=
(
seq
.
numel
()
*
seq_width
+
threads
-
1
)
/
threads
;
SequenceScaleKernel
<
T
><<<
grid
,
threads
,
0
,
context
.
stream
()
>>>
(
seq_data
,
abs_offset_lod
[
level
].
data
(),
scales
,
num_seq
,
seq_width
);
}
};
template
class
ScaleLoDTensorFunctor
<
platform
::
CUDADeviceContext
,
float
>;
}
// namespace math
}
// namespace operators
}
// namespace paddle
paddle/operators/math/sequence_scale.h
0 → 100644
浏览文件 @
b1af5e43
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/framework/lod_tensor.h"
#include "paddle/platform/device_context.h"
namespace
paddle
{
namespace
operators
{
namespace
math
{
/*
* \brief Scale a sequence.
*
* All sequences will be padded to the same length and stored in a transposed
* shape.
* Example:
* seq (s0, s0, s0, s0; s1, s1; s2, s2, s2; s3)
* padding (s0, s1, s2, s3; s0, s1, s2, 0; s0, 0, s2, 0; s0, 0, 0, 0)
*
* \param context device context of this functor.
* \param seq LoDTensor which is stored in sequence format, the shape
* is [total_sequence_length, sequence_width] where
* total_sequence_length is the sum of all sequences'
* length.
* \param padding Tensor which is padded to the same length, the shape is
* [max_sequence_length, num_sequences, sequence_width].
* \param norm_by_times whether dividing sequence's length.
*
* \note transposition is also done in this functor.
*/
template
<
typename
DeviceContext
,
typename
T
>
class
ScaleLoDTensorFunctor
{
public:
void
operator
()(
const
DeviceContext
&
context
,
framework
::
LoDTensor
&
seq
,
const
T
*
scales
,
const
size_t
num_seq
);
};
}
// namespace math
}
// namespace operators
}
// namespace paddle
paddle/operators/warpctc_op.h
浏览文件 @
b1af5e43
...
...
@@ -14,9 +14,11 @@ limitations under the License. */
#pragma once
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/math_function.h"
#include "paddle/operators/math/sequence_padding.h"
#include "paddle/operators/math/sequence_scale.h"
#include "paddle/platform/dynload/warpctc.h"
namespace
paddle
{
...
...
@@ -182,7 +184,6 @@ class WarpCTCKernel : public framework::OpKernel<T> {
Tensor
warpctc_label
;
Copy
(
*
label
,
platform
::
CPUPlace
(),
ctx
.
device_context
(),
&
warpctc_label
);
const
int
*
warpctc_label_data
=
warpctc_label
.
data
<
int
>
();
// warpctc stores loss in CPU memory
Tensor
warpctc_loss
;
T
*
warpctc_loss_data
=
...
...
@@ -206,11 +207,37 @@ class WarpCTCGradKernel : public framework::OpKernel<T> {
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
*
warpctc_grad
=
ctx
.
Input
<
Tensor
>
(
"WarpCTCGrad"
);
auto
*
logits_grad
=
ctx
.
Output
<
LoDTensor
>
(
framework
::
GradVarName
(
"Logits"
));
const
Tensor
*
loss_grad
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Loss"
));
// LOG(ERROR) << "loss_grad_dims: " << loss_grad_dims;
// for (int i=0; i<loss_grad->numel();i++) {
// LOG(ERROR) << "loss_grad: " << loss_grad_data[i];
//}
// T* logits_grad_data =
logits_grad
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
bool
norm_by_times
=
ctx
.
Attr
<
bool
>
(
"norm_by_times"
);
math
::
UnpaddingLoDTensorFunctor
<
DeviceContext
,
T
>
()(
ctx
.
template
device_context
<
DeviceContext
>(),
*
logits_grad
,
*
warpctc_grad
,
norm_by_times
);
const
T
*
loss_grad_data
=
loss_grad
->
data
<
T
>
();
const
size_t
num_seq
=
loss_grad
->
dims
()[
0
];
math
::
ScaleLoDTensorFunctor
<
DeviceContext
,
T
>
()(
ctx
.
template
device_context
<
DeviceContext
>(),
*
logits_grad
,
loss_grad_data
,
num_seq
);
/*
int level = 0;
auto logits_grad_lod = framework::ToAbsOffset(logits_grad->lod());
const size_t num_sequences = logits_grad_lod[level].size() - 1;
for (int seq_index = 0; seq_index < num_sequences; ++seq_index) {
for (int token_index = logits_grad_lod[level][seq_index];
token_index < logits_grad_lod[level][seq_index + 1];
++token_index) {
logits_grad_data[token_index] *= loss_grad_data[seq_index];
}
}
*/
}
};
...
...
python/paddle/v2/fluid/tests/test_warpctc_op.py
浏览文件 @
b1af5e43
...
...
@@ -185,16 +185,14 @@ class TestWarpCTCOp(OpTest):
"Logits"
:
(
logits
,
logits_lod
),
"Label"
:
(
labels
,
labels_lod
)
}
self
.
outputs
=
{
"Loss"
:
loss
}
self
.
outputs
=
{
"Loss"
:
loss
,
"WarpCTCGrad"
:
gradient
}
self
.
attrs
=
{
"blank"
:
blank
,
"norm_by_times"
:
norm_by_times
}
def
test_check_output
(
self
):
self
.
check_output
()
#
def test_check_output(self):
#
self.check_output()
# def test_check_grad(self):
# self.outputs["WarpCTCGrad"] = None
# self.check_grad(["Logits"], "Loss", max_relative_error=0.01)
def
test_check_grad
(
self
):
self
.
check_grad
([
"Logits"
],
"Loss"
,
max_relative_error
=
0.01
)
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录