Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
154dbb46
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
154dbb46
编写于
10月 23, 2017
作者:
C
chengduoZH
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add unit test
上级
f2ccef26
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
243 addition
and
3 deletion
+243
-3
paddle/operators/math/CMakeLists.txt
paddle/operators/math/CMakeLists.txt
+2
-2
paddle/operators/math/sequence_project.h
paddle/operators/math/sequence_project.h
+1
-1
paddle/operators/sequence_conv_op.h
paddle/operators/sequence_conv_op.h
+1
-0
python/paddle/v2/framework/tests/test_seq_conv.py
python/paddle/v2/framework/tests/test_seq_conv.py
+239
-0
未找到文件。
paddle/operators/math/CMakeLists.txt
浏览文件 @
154dbb46
...
@@ -7,7 +7,7 @@ if(WITH_GPU)
...
@@ -7,7 +7,7 @@ if(WITH_GPU)
nv_library
(
cross_entropy SRCS cross_entropy.cc cross_entropy.cu DEPS operator
)
nv_library
(
cross_entropy SRCS cross_entropy.cc cross_entropy.cu DEPS operator
)
nv_library
(
pooling SRCS pooling.cc pooling.cu DEPS device_context
)
nv_library
(
pooling SRCS pooling.cc pooling.cu DEPS device_context
)
nv_library
(
vol2col SRCS vol2col.cc vol2col.cu DEPS device_context
)
nv_library
(
vol2col SRCS vol2col.cc vol2col.cu DEPS device_context
)
nv_library
(
sequence_project SRCS sequence_project.cc sequence_project.cu DEPS device_context
)
nv_library
(
sequence_project SRCS sequence_project.cc sequence_project.cu DEPS device_context
math_function
)
else
()
else
()
cc_library
(
math_function SRCS math_function.cc im2col.cc DEPS cblas device_context operator
)
cc_library
(
math_function SRCS math_function.cc im2col.cc DEPS cblas device_context operator
)
cc_library
(
selected_rows_functor SRCS selected_rows_functor.cc DEPS selected_rows math_function
)
cc_library
(
selected_rows_functor SRCS selected_rows_functor.cc DEPS selected_rows math_function
)
...
@@ -15,7 +15,7 @@ else()
...
@@ -15,7 +15,7 @@ else()
cc_library
(
cross_entropy SRCS cross_entropy.cc DEPS operator
)
cc_library
(
cross_entropy SRCS cross_entropy.cc DEPS operator
)
cc_library
(
pooling SRCS pooling.cc DEPS device_context
)
cc_library
(
pooling SRCS pooling.cc DEPS device_context
)
cc_library
(
vol2col SRCS vol2col.cc DEPS device_context
)
cc_library
(
vol2col SRCS vol2col.cc DEPS device_context
)
nv_library
(
sequence_project SRCS sequence_project.cc DEPS device_context
)
cc_library
(
sequence_project SRCS sequence_project.cc DEPS device_context math_function
)
endif
()
endif
()
cc_test
(
math_function_test SRCS math_function_test.cc DEPS math_function tensor
)
cc_test
(
math_function_test SRCS math_function_test.cc DEPS math_function tensor
)
...
...
paddle/operators/math/sequence_project.h
浏览文件 @
154dbb46
...
@@ -69,7 +69,7 @@ template <typename Place, typename T>
...
@@ -69,7 +69,7 @@ template <typename Place, typename T>
class
SequenceProjectFunctor
{
class
SequenceProjectFunctor
{
public:
public:
void
operator
()(
const
platform
::
DeviceContext
&
context
,
void
operator
()(
const
platform
::
DeviceContext
&
context
,
const
framework
::
LoDTensor
*
&
in
,
const
framework
::
LoDTensor
*
in
,
const
framework
::
LoDTensor
*
padding_data
,
const
framework
::
LoDTensor
*
padding_data
,
framework
::
LoDTensor
*
col
,
bool
padding_trainable
,
framework
::
LoDTensor
*
col
,
bool
padding_trainable
,
int
context_start
,
int
context_length
,
int
context_stride
,
int
context_start
,
int
context_length
,
int
context_stride
,
...
...
paddle/operators/sequence_conv_op.h
浏览文件 @
154dbb46
...
@@ -125,6 +125,7 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
...
@@ -125,6 +125,7 @@ class SequenceConvGradKernel : public framework::OpKernel<T> {
auto
temp
=
framework
::
EigenVector
<
T
>::
Flatten
(
col
);
auto
temp
=
framework
::
EigenVector
<
T
>::
Flatten
(
col
);
temp
.
device
(
context
.
GetEigenDevice
<
Place
>
())
=
temp
.
device
(
context
.
GetEigenDevice
<
Place
>
())
=
temp
.
constant
(
static_cast
<
T
>
(
0
));
temp
.
constant
(
static_cast
<
T
>
(
0
));
math
::
matmul
<
Place
,
T
>
(
context
.
device_context
(),
*
out_g
,
false
,
*
filter
,
math
::
matmul
<
Place
,
T
>
(
context
.
device_context
(),
*
out_g
,
false
,
*
filter
,
true
,
T
(
1.0
),
&
col
,
T
(
1.0
));
true
,
T
(
1.0
),
&
col
,
T
(
1.0
));
}
}
...
...
python/paddle/v2/framework/tests/test_seq_conv.py
0 → 100644
浏览文件 @
154dbb46
import
unittest
import
numpy
as
np
import
random
from
op_test
import
OpTest
class
TestSeqProject
(
OpTest
):
def
setUp
(
self
):
self
.
init_test_case
()
self
.
op_type
=
'sequence_conv'
if
self
.
context_length
==
1
\
and
self
.
context_start
==
0
\
and
self
.
padding_trainable
:
print
"If context_start is 0 "
\
"and context_length is 1,"
\
" padding_trainable should be false."
return
# one level, batch size
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
self
.
input_size
[
0
],
self
.
input_size
[
1
]]).
astype
(
'float32'
)
self
.
begin_pad
=
np
.
max
([
0
,
-
self
.
context_start
])
self
.
end_pad
=
np
.
max
([
0
,
self
.
context_start
+
self
.
context_length
-
1
])
self
.
total_pad
=
self
.
begin_pad
+
self
.
end_pad
if
self
.
total_pad
==
0
:
self
.
total_pad
=
1
# PaddingData mast be not empty.
# Otherwise(EnforceNotMet: enforce numel() > 0 failed, 0 <= 0)
padding_data
=
np
.
random
.
uniform
(
0.1
,
1
,
[
self
.
total_pad
,
self
.
input_size
[
1
]]).
astype
(
'float32'
)
w
=
np
.
random
.
uniform
(
0.1
,
1
,
[
self
.
context_length
,
self
.
input_size
[
1
]]).
astype
(
'float32'
)
self
.
inputs
=
{
'X'
:
(
x
,
self
.
lod
),
'PaddingData'
:
(
padding_data
,
[[
0
,
self
.
total_pad
]]),
'Filter'
:
(
w
,
[[
0
,
self
.
context_length
]])
}
self
.
attrs
=
{
'context_start'
:
self
.
context_start
,
'context_length'
:
self
.
context_length
,
'padding_trainable'
:
self
.
padding_trainable
,
'context_stride'
:
self
.
context_stride
}
out
=
np
.
zeros
((
self
.
input_size
[
0
],
1
)).
astype
(
'float32'
)
self
.
outputs
=
{
'Out'
:
out
}
self
.
compute
()
def
compute
(
self
):
x
,
lod
=
self
.
inputs
[
'X'
]
filter
=
self
.
inputs
[
'Filter'
]
pading_data
,
_
=
self
.
inputs
[
'PaddingData'
]
out
=
np
.
zeros
((
self
.
input_size
[
0
],
self
.
context_length
*
self
.
input_size
[
1
])).
astype
(
'float32'
)
lod
=
lod
[
0
]
begin_pad
=
np
.
max
([
0
,
-
self
.
context_start
])
for
i
in
range
(
len
(
lod
)
-
1
):
for
j
in
range
(
self
.
context_length
):
in_begin
=
lod
[
i
]
+
self
.
context_start
+
j
in_end
=
lod
[
i
+
1
]
+
self
.
context_start
+
j
out_begin
=
lod
[
i
]
out_end
=
lod
[
i
+
1
]
if
in_begin
<
lod
[
i
]:
pad_size
=
np
.
min
([
lod
[
i
]
-
in_begin
,
lod
[
i
+
1
]
-
lod
[
i
]])
if
self
.
padding_trainable
:
sub_w
=
pading_data
[
j
:
j
+
pad_size
,
:]
out
[
lod
[
i
]:
lod
[
i
]
+
pad_size
,
j
*
self
.
input_size
[
1
]:(
j
+
1
)
*
self
.
input_size
[
1
]]
=
sub_w
out_begin
=
lod
[
i
]
+
pad_size
in_begin
=
lod
[
i
]
if
in_end
>
lod
[
i
+
1
]:
pad_size
=
np
.
min
(
[
in_end
-
lod
[
i
+
1
],
lod
[
i
+
1
]
-
lod
[
i
]])
if
self
.
padding_trainable
:
sub_w
=
pading_data
[
begin_pad
+
self
.
context_start
+
j
-
pad_size
:
begin_pad
+
self
.
context_start
+
j
,
:]
out
[
lod
[
i
+
1
]
-
pad_size
:
lod
[
i
+
1
],
j
*
self
.
input_size
[
1
]:(
j
+
1
)
*
self
.
input_size
[
1
]]
=
sub_w
in_end
=
lod
[
i
+
1
]
out_end
=
lod
[
i
+
1
]
-
pad_size
if
in_end
<=
in_begin
:
continue
in_sub
=
x
[
in_begin
:
in_end
,
:]
out
[
out_begin
:
out_end
,
j
*
self
.
input_size
[
1
]:(
j
+
1
)
*
self
.
input_size
[
1
]]
+=
in_sub
filter_dim
=
filter
[
0
].
shape
output_dim
=
self
.
outputs
[
'Out'
].
shape
filter
[
0
].
shape
=
filter_dim
[
0
]
*
filter_dim
[
1
]
self
.
outputs
[
'Out'
].
shape
=
(
output_dim
[
0
],
)
np
.
dot
(
out
,
filter
[
0
],
out
=
self
.
outputs
[
'Out'
])
filter
[
0
].
shape
=
filter_dim
self
.
outputs
[
'Out'
].
shape
=
output_dim
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
if
self
.
padding_trainable
:
self
.
check_grad
(
set
([
'X'
,
'PaddingData'
,
'Filter'
]),
'Out'
,
max_relative_error
=
0.05
)
def
test_check_grad_input
(
self
):
self
.
check_grad
(
[
'X'
],
'Out'
,
max_relative_error
=
0.05
,
no_grad_set
=
set
([
'PaddingData'
,
'Filter'
]))
def
test_check_grad_padding_data
(
self
):
if
self
.
padding_trainable
:
self
.
check_grad
(
[
'PaddingData'
],
'Out'
,
max_relative_error
=
0.05
,
no_grad_set
=
set
([
'X'
,
'Filter'
]))
def
test_check_grad_Filter
(
self
):
self
.
check_grad
(
[
'Filter'
],
'Out'
,
max_relative_error
=
0.05
,
no_grad_set
=
set
([
'X'
,
'PaddingData'
]))
def
init_test_case
(
self
):
self
.
op_type
=
"sequence_project"
self
.
input_row
=
11
self
.
context_start
=
0
self
.
context_length
=
1
self
.
padding_trainable
=
False
self
.
context_stride
=
1
self
.
input_size
=
[
self
.
input_row
,
23
]
self
.
lod
=
[[
0
,
4
,
5
,
8
,
self
.
input_row
]]
class
TestSeqProjectCase1
(
TestSeqProject
):
def
init_test_case
(
self
):
self
.
op_type
=
"sequence_project"
self
.
input_row
=
11
self
.
context_start
=
-
1
self
.
context_length
=
3
self
.
padding_trainable
=
True
self
.
context_stride
=
1
self
.
input_size
=
[
self
.
input_row
,
23
]
self
.
lod
=
[[
0
,
4
,
5
,
8
,
self
.
input_row
]]
class
TestSeqProjectCase2
(
TestSeqProject
):
def
init_test_case
(
self
):
self
.
op_type
=
"sequence_project"
self
.
input_row
=
25
self
.
context_start
=
2
self
.
context_length
=
3
self
.
padding_trainable
=
True
self
.
context_stride
=
1
self
.
input_size
=
[
self
.
input_row
,
23
]
idx
=
range
(
self
.
input_size
[
0
])
del
idx
[
0
]
self
.
lod
=
[[
0
]
+
np
.
sort
(
random
.
sample
(
idx
,
8
)).
tolist
()
+
[
self
.
input_size
[
0
]]]
'''
class TestSeqProjectCases(TestSeqProject):
def setUp(self):
self.init_test_case()
self.op_type = 'sequence_project'
num = 0
for context_start in [-5, -3, -1, 0, 3]:
for context_length in [1, 2, 5, 7]:
for batch_size in [1, 2, 5, 7]:
for padding_trainable in [False, True]:
if context_length == 1 and context_start == 0 and padding_trainable:
continue
self.context_start = context_start
self.context_length = context_length
self.padding_trainable = padding_trainable
self.input_size = [batch_size, 23]
x = np.random.uniform(0.1, 1,
self.input_size).astype('float32')
self.lod = [[0, self.input_size[0]]]
if self.input_size[0] > 2:
idx = range(self.input_size[0])
del idx[0]
self.lod = [
[0] + np.sort(random.sample(idx, 2)).tolist() +
[self.input_size[0]]
]
self.begin_pad = np.max([0, -self.context_start])
self.end_pad = np.max([0, self.context_start + self.context_length - 1])
self.total_pad = self.begin_pad + self.end_pad
if self.total_pad == 0:
self.total_pad = 1
# PaddingData mast be not empty. Otherwise(EnforceNotMet: enforce numel() > 0 failed, 0 <= 0)
padding_data = np.random.uniform(
0.1, 1, [self.total_pad, self.input_size[1]]).astype('float32')
self.inputs = {
'X': (x, self.lod),
'PaddingData': (padding_data, [[0, self.total_pad]])
}
self.attrs = {
'context_start': self.context_start,
'context_length': self.context_length,
'padding_trainable': self.padding_trainable,
'context_stride': self.context_stride
}
out = np.zeros((self.input_size[0], self.input_size[1] *
self.context_length)).astype('float32')
self.outputs = {'Out': out}
print num
print self.attrs
print batch_size
print padding_trainable
print "$$$$$$$$$$$$$"
self.compute()
self.test_check_output()
num += 1
'''
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录