Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
b65722d3
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b65722d3
编写于
12月 01, 2018
作者:
P
phlrain
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix uni test; test=develop
上级
2770ea1a
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
54 addition
and
27 deletion
+54
-27
paddle/fluid/operators/cudnn_lstm_op.cu.cc
paddle/fluid/operators/cudnn_lstm_op.cu.cc
+0
-6
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+6
-7
python/paddle/fluid/tests/unittests/op_test.py
python/paddle/fluid/tests/unittests/op_test.py
+9
-0
python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py
python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py
+39
-14
未找到文件。
paddle/fluid/operators/cudnn_lstm_op.cu.cc
浏览文件 @
b65722d3
...
@@ -279,12 +279,6 @@ class CudnnLSTMGPUKernel : public framework::OpKernel<T> {
...
@@ -279,12 +279,6 @@ class CudnnLSTMGPUKernel : public framework::OpKernel<T> {
int
num_layers
=
ctx
.
Attr
<
int
>
(
"num_layers"
);
int
num_layers
=
ctx
.
Attr
<
int
>
(
"num_layers"
);
bool
is_test
=
ctx
.
Attr
<
bool
>
(
"is_test"
);
bool
is_test
=
ctx
.
Attr
<
bool
>
(
"is_test"
);
/*
if (is_test) {
TensorCopy(*x, ctx.GetPlace(), out);
return;
}*/
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
auto
handle
=
dev_ctx
.
cudnn_handle
();
auto
handle
=
dev_ctx
.
cudnn_handle
();
auto
*
cache_var
=
ctx
.
InputVar
(
"Cache"
);
auto
*
cache_var
=
ctx
.
InputVar
(
"Cache"
);
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
b65722d3
...
@@ -477,12 +477,10 @@ def lstm(input,
...
@@ -477,12 +477,10 @@ def lstm(input,
init_h
,
init_h
,
init_c
,
init_c
,
max_len
,
max_len
,
dropout_prob
,
input_size
,
hidden_size
,
hidden_size
,
num_layers
,
num_layers
,
dropout_prob
=
0.0
,
is_bidirec
=
False
,
is_bidirec
=
False
,
dtype
=
'float32'
,
is_test
=
False
,
is_test
=
False
,
name
=
None
,
name
=
None
,
default_initializer
=
None
,
default_initializer
=
None
,
...
@@ -531,13 +529,11 @@ def lstm(input,
...
@@ -531,13 +529,11 @@ def lstm(input,
This is a tensor with shape ( num_layers x batch_size x hidden_size )
This is a tensor with shape ( num_layers x batch_size x hidden_size )
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
if is_bidirec = True, shape should be ( num_layers*2 x batch_size x hidden_size)
max_len (int): max length of LSTM. the first dim of input tensor CAN NOT greater than max_len
max_len (int): max length of LSTM. the first dim of input tensor CAN NOT greater than max_len
dropout_prob(float): dropout prob, dropout ONLY work between rnn layers, NOT between time steps
There is NO dropout work on rnn output of the last RNN layers
input_size (int): hidden size of the input tensor
hidden_size (int): hidden size of the LSTM
hidden_size (int): hidden size of the LSTM
num_layers (int): total layers number of the LSTM
num_layers (int): total layers number of the LSTM
dropout_prob(float|0.0): dropout prob, dropout ONLY work between rnn layers, NOT between time steps
There is NO dropout work on rnn output of the last RNN layers
is_bidirec (bool): If it is bidirectional
is_bidirec (bool): If it is bidirectional
dtype (str): Data type. Choices = ["float32", "float64"], default "float32".
is_test (bool): If it is in test phrase
is_test (bool): If it is in test phrase
name (str|None): A name for this layer(optional). If set None, the layer
name (str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
will be named automatically.
...
@@ -577,6 +573,9 @@ def lstm(input,
...
@@ -577,6 +573,9 @@ def lstm(input,
helper
=
LayerHelper
(
'cudnn_lstm'
,
**
locals
())
helper
=
LayerHelper
(
'cudnn_lstm'
,
**
locals
())
dtype
=
input
.
dtype
input_shape
=
list
(
input
.
shape
)
input_size
=
input_shape
[
-
1
]
weight_size
=
0
weight_size
=
0
for
i
in
range
(
num_layers
):
for
i
in
range
(
num_layers
):
if
i
==
0
:
if
i
==
0
:
...
...
python/paddle/fluid/tests/unittests/op_test.py
浏览文件 @
b65722d3
...
@@ -216,6 +216,15 @@ class OpTest(unittest.TestCase):
...
@@ -216,6 +216,15 @@ class OpTest(unittest.TestCase):
self
.
dtype
)
self
.
dtype
)
outputs
=
append_input_output
(
block
,
op_proto
,
self
.
outputs
,
False
,
outputs
=
append_input_output
(
block
,
op_proto
,
self
.
outputs
,
False
,
self
.
dtype
)
self
.
dtype
)
if
hasattr
(
self
,
"cache_name_list"
):
for
name
in
self
.
cache_name_list
:
inputs
[
name
]
=
block
.
create_var
(
name
=
name
,
persistable
=
True
,
type
=
core
.
VarDesc
.
VarType
.
RAW
,
stop_gradient
=
True
)
op
=
block
.
append_op
(
op
=
block
.
append_op
(
type
=
self
.
op_type
,
type
=
self
.
op_type
,
inputs
=
inputs
,
inputs
=
inputs
,
...
...
python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py
浏览文件 @
b65722d3
...
@@ -19,6 +19,11 @@ import numpy as np
...
@@ -19,6 +19,11 @@ import numpy as np
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
from
op_test
import
OpTest
from
op_test
import
OpTest
import
paddle.fluid
as
fluid
SIGMOID_THRESHOLD_MIN
=
-
40.0
SIGMOID_THRESHOLD_MAX
=
13.0
EXP_MAX_INPUT
=
40.0
def
lstm_naive
(
def
lstm_naive
(
...
@@ -70,10 +75,15 @@ def lstm_naive(
...
@@ -70,10 +75,15 @@ def lstm_naive(
bo_2
=
w
[
offset
:
offset
+
hidden_size
]
bo_2
=
w
[
offset
:
offset
+
hidden_size
]
def
sigmoid
(
x
):
def
sigmoid
(
x
):
return
1.0
/
(
1.0
+
np
.
exp
(
-
x
))
y
=
np
.
copy
(
x
)
y
[
x
<
SIGMOID_THRESHOLD_MIN
]
=
SIGMOID_THRESHOLD_MIN
y
[
x
>
SIGMOID_THRESHOLD_MAX
]
=
SIGMOID_THRESHOLD_MAX
return
1.
/
(
1.
+
np
.
exp
(
-
y
))
def
tanh
(
x
):
def
tanh
(
x
):
return
(
np
.
exp
(
x
)
-
np
.
exp
(
-
x
))
/
(
np
.
exp
(
x
)
+
np
.
exp
(
-
x
))
y
=
-
2.
*
x
y
[
y
>
EXP_MAX_INPUT
]
=
EXP_MAX_INPUT
return
(
2.
/
(
1.
+
np
.
exp
(
y
)))
-
1.
output
=
[]
output
=
[]
pre_h
=
np
.
zeros
((
batch_size
,
hidden_size
),
dtype
=
input
.
dtype
)
pre_h
=
np
.
zeros
((
batch_size
,
hidden_size
),
dtype
=
input
.
dtype
)
...
@@ -103,7 +113,7 @@ def lstm_naive(
...
@@ -103,7 +113,7 @@ def lstm_naive(
output
=
output
.
transpose
((
1
,
0
,
2
))
output
=
output
.
transpose
((
1
,
0
,
2
))
return
output
return
output
,
pre_h
,
pre_c
class
TestCUDNNLstmOp
(
OpTest
):
class
TestCUDNNLstmOp
(
OpTest
):
...
@@ -120,20 +130,32 @@ class TestCUDNNLstmOp(OpTest):
...
@@ -120,20 +130,32 @@ class TestCUDNNLstmOp(OpTest):
weight_size
=
input_weight_size
+
hidden_weight_size
weight_size
=
input_weight_size
+
hidden_weight_size
weight_size
+=
hidden_size
*
8
weight_size
+=
hidden_size
*
8
input
=
np
.
random
.
random
(
input
=
np
.
random
.
uniform
(
(
num_steps
,
batch_size
,
hidden_size
)).
astype
(
self
.
dtype
)
low
=-
0.1
,
high
=
0.1
,
size
=
(
num_steps
,
batch_size
,
flat_w
=
np
.
random
.
random
((
weight_size
)).
astype
(
self
.
dtype
)
hidden_size
)).
astype
(
self
.
dtype
)
flat_w
=
np
.
random
.
uniform
(
low
=-
0.1
,
high
=
0.1
,
size
=
(
weight_size
)).
astype
(
self
.
dtype
)
output
=
lstm_naive
(
input
,
flat_w
)
output
,
last_hidden
,
last_cell
=
lstm_naive
(
input
,
flat_w
)
init_h
=
np
.
zeros
((
batch_size
,
hidden_size
),
dtype
=
np
.
float32
)
init_h
=
np
.
zeros
((
batch_size
,
hidden_size
),
dtype
=
np
.
float32
)
init_c
=
np
.
zeros
((
batch_size
,
hidden_size
),
dtype
=
np
.
float32
)
init_c
=
np
.
zeros
((
batch_size
,
hidden_size
),
dtype
=
np
.
float32
)
scope
=
core
.
Scope
()
program
=
fluid
.
Program
()
block
=
program
.
global_block
()
cache_temp
=
block
.
create_var
(
name
=
"Cache"
,
persistable
=
True
,
type
=
core
.
VarDesc
.
VarType
.
RAW
,
stop_gradient
=
True
)
self
.
inputs
=
{
self
.
inputs
=
{
'Input'
:
OpTest
.
np_dtype_to_fluid_dtype
(
input
),
'Input'
:
OpTest
.
np_dtype_to_fluid_dtype
(
input
),
'W'
:
OpTest
.
np_dtype_to_fluid_dtype
(
flat_w
),
'W'
:
OpTest
.
np_dtype_to_fluid_dtype
(
flat_w
),
'InitH'
:
OpTest
.
np_dtype_to_fluid_dtype
(
init_h
),
'InitH'
:
OpTest
.
np_dtype_to_fluid_dtype
(
init_h
),
'InitC'
:
OpTest
.
np_dtype_to_fluid_dtype
(
init_c
),
'InitC'
:
OpTest
.
np_dtype_to_fluid_dtype
(
init_c
),
}
}
self
.
cache_name_list
=
[
'Cache'
]
self
.
attrs
=
{
self
.
attrs
=
{
'max_len'
:
num_steps
,
'max_len'
:
num_steps
,
'dropout_prob'
:
0.0
,
'dropout_prob'
:
0.0
,
...
@@ -142,13 +164,16 @@ class TestCUDNNLstmOp(OpTest):
...
@@ -142,13 +164,16 @@ class TestCUDNNLstmOp(OpTest):
'hidden_size'
:
hidden_size
,
'hidden_size'
:
hidden_size
,
'num_layers'
:
1
,
'num_layers'
:
1
,
}
}
self
.
outputs
=
{
'Out'
:
output
}
self
.
outputs
=
{
'Out'
:
output
,
def
test_grad_with_place
(
self
):
"last_h"
:
last_hidden
,
place
=
core
.
CUDAPlace
(
0
)
'last_c'
:
last_cell
self
.
check_grad_with_place
(
place
,
atol
=
1e-5
)
}
def
test_output_with_place
(
self
):
def
test_output_with_place
(
self
):
place
=
core
.
CUDAPlace
(
0
)
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
place
,
atol
=
1e-5
,
no_check_set
=
[
'last_h'
,
'last_c'
])
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录