Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
ce960575
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
ce960575
编写于
10月 23, 2017
作者:
C
chengduoZH
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix doc format and unit test
上级
4c19f9f4
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
80 addition
and
75 deletion
+80
-75
paddle/operators/sequence_project_op.cc
paddle/operators/sequence_project_op.cc
+34
-28
paddle/operators/sequence_project_op.h
paddle/operators/sequence_project_op.h
+2
-23
python/paddle/v2/framework/tests/test_seq_project.py
python/paddle/v2/framework/tests/test_seq_project.py
+44
-24
未找到文件。
paddle/operators/sequence_project_op.cc
浏览文件 @
ce960575
...
...
@@ -27,10 +27,12 @@ class SequenceProjectOp : public framework::OperatorWithKernel {
"Input(X) of SequenceProjectOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of SequenceProjectOp should not be null."
);
// PaddingData mast be not empty.
// PaddingData mast be not empty. Otherwise(EnforceNotMet: enforce numel() >
// 0 failed, 0 <= 0)
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"PaddingData"
),
"Output(PaddingData) of SequenceProjectOp should not be null."
);
"Input(PaddingData) of SequenceProjectOp should not be null."
);
auto
in_dims
=
ctx
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE
(
in_dims
.
size
()
==
2
,
"Input(X) should be 2-D tensor."
);
...
...
@@ -47,7 +49,7 @@ class SequenceProjectOp : public framework::OperatorWithKernel {
if
(
context_start
==
0
&&
context_length
==
1
)
{
PADDLE_THROW
(
"
if context_start == 0 && context_length ==
1, padding_trainable "
"
If context_start is 0 and context_length is
1, padding_trainable "
"should be false."
);
}
PADDLE_ENFORCE
(
padding_dim
.
size
()
==
2
,
...
...
@@ -70,8 +72,8 @@ class SequenceProjectGradOp : public framework::OperatorWithKernel {
protected:
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
"Gradient of
Out
should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"The input
X
should not be null."
);
"Gradient of
output(Out)
should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"The input
(X)
should not be null."
);
if
(
ctx
->
Attrs
().
Get
<
bool
>
(
"padding_trainable"
)
&&
ctx
->
HasOutput
(
framework
::
GradVarName
(
"PaddingData"
)))
{
...
...
@@ -89,31 +91,35 @@ class SequenceProjectOpMaker : public framework::OpProtoAndCheckerMaker {
SequenceProjectOpMaker
(
framework
::
OpProto
*
proto
,
framework
::
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"A float LoDTensor, the variable-length input of SequenceProjectOp"
);
AddOutput
(
"Out"
,
"A float LoDTensor, the variable-length output of SequenceProjectOp."
);
AddInput
(
"PaddingData"
,
// PaddingData can be a float tensor
"A float LoDTensor, the padding data of SequenceProjectOp."
);
AddInput
(
"X"
,
"(A float LoDTensor) the input of SequenceProjectOp, a vector of "
"2-D matrix of size (minibatch, number_of_input_features)."
);
AddOutput
(
"Out"
,
"(A float LoDTensor) the output of SequenceProjectOp, a vector "
"of 2-D matrix of size (minibatch, number_of_input_features x "
"context_length)."
);
AddInput
(
"PaddingData"
,
"(A float LoDTensor) the input of SequenceProjectOp, a vector of "
"2-D matrix of size (up_pad + down_pad, "
"number_of_input_features). "
);
AddAttr
<
bool
>
(
"padding_trainable"
,
"(bool, default false) the padding data of SequenceProjectOp "
"is trainable or not."
)
.
SetDefault
(
false
);
AddAttr
<
int
>
(
"context_length"
,
"(int, default 3) the
stride
of SequenceProjectOp."
)
"(int, default 3) the
context_length
of SequenceProjectOp."
)
.
SetDefault
(
3
)
.
GreaterThan
(
0
);
AddAttr
<
int
>
(
"context_start"
,
"(int, default 0) the
xx
of SequenceProjectOp."
)
"(int, default 0) the
context_start
of SequenceProjectOp."
)
.
SetDefault
(
0
);
AddAttr
<
int
>
(
"context_stride"
,
"(int, default 1) the xx of SequenceProjectOp."
)
"(int, default 1) the context_stride of SequenceProjectOp. "
"Currently, sequence_project_op only support "
"context_stride=1."
)
.
SetDefault
(
1
)
.
GreaterThan
(
0
);
// Currently, sequence_project_op only support context_stride=1
.
GreaterThan
(
0
);
AddComment
(
R"DOC(
SequenceProjectOp projects features of context_length time-steps of each instance.
...
...
@@ -132,22 +138,22 @@ class SequenceProjectOpMaker : public framework::OpProtoAndCheckerMaker {
representation is 2.
- Case1:
If we use zero to pad instead of learned weight to pad,
If
context_start is -1 and padding_trainable is false,
we use zero to pad instead of learned weight to pad,
and the context_lenth is 3, the output (Out) is:
Out = [0, 0, a1, a2, b1, b2;
a1, a2, b1, b2, c1, c2;
b1, b2, c1, c2, 0, 0;
0,
0,
d1, d2, 0, 0]
b1, b2, c1, c2, 0,
0;
0,
0,
d1, d2, 0, 0]
- Case2:
// If we use zero to pad instead of
learned weight to pad,
//
and the context_lenth is 3, the output (Out) is:
//
// Out = [0, 0,
a1, a2, b1, b2;
//
a1, a2, b1, b2, c1, c2;
// b1, b2, c1, c2, 0, 0
;
// 0, 0, d1, d2, 0, 0
]
If context_start is -1 and padding_trainable is true, we use
learned weight to pad,
and the context_lenth is 3, the output (Out) is:
Out = [w1, w2,
a1, a2, b1, b2;
a1, a2, b1, b2, c1, c2;
b1, b2, c1, c2, w3, w4
;
w1, w2, d1, d2, w3, w4
]
)DOC"
);
}
...
...
paddle/operators/sequence_project_op.h
浏览文件 @
ce960575
...
...
@@ -55,26 +55,17 @@ class SequenceProjectKernel : public framework::OpKernel<T> {
PADDLE_ENFORCE_EQ
(
in
->
lod
().
size
(),
1UL
,
"Only support one level sequence now."
);
auto
lod_level_0
=
in
->
lod
()[
0
];
int64_t
input_width
=
in
->
dims
()[
1
];
int64_t
output_width
=
out
->
dims
()[
1
];
int64_t
padding_width
=
0
;
PADDLE_ENFORCE
(
input_width
*
context_length
==
output_width
,
"Input size and pooling size should be consistent."
);
const
LoDTensor
*
padding_data
=
nullptr
;
if
(
padding_trainable
)
{
padding_data
=
context
.
Input
<
LoDTensor
>
(
"PaddingData"
);
PADDLE_ENFORCE_EQ
(
padding_data
->
dims
().
size
(),
2UL
,
"Only support one level sequence now."
);
padding_width
=
padding_data
->
dims
()[
1
];
PADDLE_ENFORCE
(
padding_width
==
input_width
,
"Input size and pooling size should be consistent."
);
}
int
up_pad
=
std
::
max
(
0
,
-
context_start
);
int
down_pad
=
std
::
max
(
0
,
context_start
+
context_length
-
1
);
int
sequence_height
,
sequence_width
;
int
input_row_begin
,
input_row_end
;
sequence_width
=
static_cast
<
int
>
(
in
->
dims
()[
1
]);
paddle
::
operators
::
math
::
Im2ColFunctor
<
paddle
::
operators
::
math
::
ColFormat
::
kOCF
,
Place
,
float
>
...
...
@@ -90,7 +81,6 @@ class SequenceProjectKernel : public framework::OpKernel<T> {
static_cast
<
int
>
(
lod_level_0
[
i
+
1
]));
sequence_height
=
static_cast
<
int
>
(
out_t
.
dims
()[
0
]);
sequence_width
=
static_cast
<
int
>
(
in
->
dims
()[
1
]);
std
::
vector
<
int64_t
>
output_shape
(
{
sequence_height
,
1
,
1
,
context_length
,
...
...
@@ -190,13 +180,6 @@ class SequenceProjectGradKernel : public framework::OpKernel<T> {
"Only support one level sequence now."
);
auto
lod_g_level_0
=
in
->
lod
()[
0
];
int64_t
input_width
=
in
->
dims
()[
1
];
int64_t
output_width
=
out_g
->
dims
()[
1
];
int64_t
padding_width
=
0
;
PADDLE_ENFORCE
(
input_width
*
context_length
==
output_width
,
"Input size and pooling size should be consistent."
);
int
up_pad
=
std
::
max
(
0
,
-
context_start
);
int
down_pad
=
std
::
max
(
0
,
context_start
+
context_length
-
1
);
int
sequence_height
,
sequence_width
;
...
...
@@ -250,11 +233,7 @@ class SequenceProjectGradKernel : public framework::OpKernel<T> {
if
(
padding_trainable
&&
padding_data_g
)
{
padding_data_g
->
mutable_data
<
T
>
(
context
.
GetPlace
());
PADDLE_ENFORCE_EQ
(
padding_data_g
->
dims
().
size
(),
2UL
,
"Only support one level sequence now."
);
padding_width
=
padding_data_g
->
dims
()[
1
];
PADDLE_ENFORCE
(
padding_width
==
input_width
,
"Input size and pooling size should be consistent."
);
math
::
SetConstant
<
Place
,
T
>
functor
;
functor
(
context
.
device_context
(),
padding_data_g
,
0
);
...
...
python/paddle/v2/framework/tests/test_seq_project.py
浏览文件 @
ce960575
...
...
@@ -8,6 +8,10 @@ class TestSeqProject(OpTest):
def
setUp
(
self
):
self
.
init_test_case
()
self
.
op_type
=
'sequence_project'
if
self
.
context_length
==
1
and
self
.
context_start
==
0
and
self
.
padding_trainable
:
print
"If context_start is 0 and context_length is 1, padding_trainable should be false."
return
# one level, batch size
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
self
.
input_size
[
0
],
self
.
input_size
[
1
]]).
astype
(
'float32'
)
...
...
@@ -15,11 +19,15 @@ class TestSeqProject(OpTest):
self
.
begin_pad
=
np
.
max
([
0
,
-
self
.
context_start
])
self
.
end_pad
=
np
.
max
([
0
,
self
.
context_start
+
self
.
context_length
-
1
])
self
.
total_pad
=
self
.
begin_pad
+
self
.
end_pad
w
=
np
.
random
.
uniform
(
if
self
.
total_pad
==
0
:
self
.
total_pad
=
1
# PaddingData mast be not empty. Otherwise(EnforceNotMet: enforce numel() > 0 failed, 0 <= 0)
padding_data
=
np
.
random
.
uniform
(
0.1
,
1
,
[
self
.
total_pad
,
self
.
input_size
[
1
]]).
astype
(
'float32'
)
self
.
inputs
=
{
'X'
:
(
x
,
self
.
lod
),
'PaddingData'
:
(
w
,
[[
0
,
self
.
total_pad
]])
'PaddingData'
:
(
padding_data
,
[[
0
,
self
.
total_pad
]])
}
self
.
attrs
=
{
'context_start'
:
self
.
context_start
,
...
...
@@ -34,7 +42,7 @@ class TestSeqProject(OpTest):
def
compute
(
self
):
x
,
lod
=
self
.
inputs
[
'X'
]
w
,
_
=
self
.
inputs
[
'PaddingData'
]
pading_data
,
_
=
self
.
inputs
[
'PaddingData'
]
out
=
self
.
outputs
[
'Out'
]
lod
=
lod
[
0
]
begin_pad
=
np
.
max
([
0
,
-
self
.
context_start
])
...
...
@@ -48,7 +56,7 @@ class TestSeqProject(OpTest):
if
in_begin
<
lod
[
i
]:
pad_size
=
np
.
min
([
lod
[
i
]
-
in_begin
,
lod
[
i
+
1
]
-
lod
[
i
]])
if
self
.
padding_trainable
:
sub_w
=
w
[
j
:
j
+
pad_size
,
:]
sub_w
=
pading_data
[
j
:
j
+
pad_size
,
:]
out
[
lod
[
i
]:
lod
[
i
]
+
pad_size
,
j
*
self
.
input_size
[
1
]:(
j
+
1
)
*
self
.
input_size
[
1
]]
=
sub_w
out_begin
=
lod
[
i
]
+
pad_size
...
...
@@ -58,8 +66,9 @@ class TestSeqProject(OpTest):
pad_size
=
np
.
min
(
[
in_end
-
lod
[
i
+
1
],
lod
[
i
+
1
]
-
lod
[
i
]])
if
self
.
padding_trainable
:
sub_w
=
w
[
begin_pad
+
self
.
context_start
+
j
-
pad_size
:
begin_pad
+
self
.
context_start
+
j
,
:]
sub_w
=
pading_data
[
begin_pad
+
self
.
context_start
+
j
-
pad_size
:
begin_pad
+
self
.
context_start
+
j
,
:]
out
[
lod
[
i
+
1
]
-
pad_size
:
lod
[
i
+
1
],
j
*
self
.
input_size
[
1
]:(
j
+
1
)
*
self
.
input_size
[
1
]]
=
sub_w
in_end
=
lod
[
i
+
1
]
...
...
@@ -75,8 +84,9 @@ class TestSeqProject(OpTest):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
(
set
([
'X'
,
'PaddingData'
]),
'Out'
,
max_relative_error
=
0.05
)
if
self
.
padding_trainable
:
self
.
check_grad
(
set
([
'X'
,
'PaddingData'
]),
'Out'
,
max_relative_error
=
0.05
)
def
test_check_grad_no_filter
(
self
):
self
.
check_grad
(
...
...
@@ -86,12 +96,26 @@ class TestSeqProject(OpTest):
no_grad_set
=
set
([
'PaddingData'
]))
def
test_check_grad_no_input
(
self
):
self
.
check_grad
(
[
'PaddingData'
],
'Out'
,
max_relative_error
=
0.05
,
no_grad_set
=
set
([
'X'
]))
if
self
.
padding_trainable
:
self
.
check_grad
(
[
'PaddingData'
],
'Out'
,
max_relative_error
=
0.05
,
no_grad_set
=
set
([
'X'
]))
def
init_test_case
(
self
):
self
.
op_type
=
"sequence_project"
self
.
input_row
=
11
self
.
context_start
=
0
self
.
context_length
=
1
self
.
padding_trainable
=
False
self
.
context_stride
=
1
self
.
input_size
=
[
self
.
input_row
,
23
]
self
.
lod
=
[[
0
,
4
,
5
,
8
,
self
.
input_row
]]
class
TestSeqProjectCase1
(
TestSeqProject
):
def
init_test_case
(
self
):
self
.
op_type
=
"sequence_project"
self
.
input_row
=
11
...
...
@@ -104,7 +128,7 @@ class TestSeqProject(OpTest):
self
.
lod
=
[[
0
,
4
,
5
,
8
,
self
.
input_row
]]
class
TestSeqProjectCase
1
(
TestSeqProject
):
class
TestSeqProjectCase
2
(
TestSeqProject
):
def
init_test_case
(
self
):
self
.
op_type
=
"sequence_project"
self
.
input_row
=
25
...
...
@@ -151,21 +175,17 @@ class TestSeqProjectCases(TestSeqProject):
]
self.begin_pad = np.max([0, -self.context_start])
self.end_pad = np.max(
[0, self.context_start + self.context_length - 1])
self.end_pad = np.max([0, self.context_start + self.context_length - 1])
self.total_pad = self.begin_pad + self.end_pad
# w = np.ones((self.total_pad, self.input_size[1])) * 100
w = np.array(range(self.total_pad * self.input_size[1]))
w.shape = self.total_pad, self.input_size[1]
if self.total_pad * self.input_size[1] == 0:
w = np.random.uniform(
0.1, 1,
(1, self.input_size[1])).astype('float32')
if self.total_pad == 0:
self.total_pad = 1
# PaddingData mast be not empty. Otherwise(EnforceNotMet: enforce numel() > 0 failed, 0 <= 0)
padding_data = np.random.uniform(
0.1, 1, [self.total_pad, self.input_size[1]]).astype('float32')
self.inputs = {
'X': (x, self.lod),
'PaddingData': (
w
, [[0, self.total_pad]])
'PaddingData': (
padding_data
, [[0, self.total_pad]])
}
self.attrs = {
'context_start': self.context_start,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录