Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
b86bff11
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 1 年 前同步成功
通知
207
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
b86bff11
编写于
10月 28, 2019
作者:
L
lfchener
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Change StaticRNN to fluid.layers.rnn
上级
5834f66e
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
86 addition
and
57 deletion
+86
-57
model_utils/network.py
model_utils/network.py
+86
-57
未找到文件。
model_utils/network.py
浏览文件 @
b86bff11
...
...
@@ -59,50 +59,53 @@ def conv_bn_layer(input, filter_size, num_channels_in, num_channels_out, stride,
return
padding_reset
def
simple_rnn
(
input
,
size
,
param_attr
=
None
,
bias_attr
=
None
,
is_reverse
=
False
):
'''A simple rnn layer.
:param input: input layer.
:type input: Variable
:param size: Dimension of RNN cells.
:type size: int
class
RNNCell
(
fluid
.
layers
.
RNNCell
):
'''A simple rnn cell.
:param hidden_size: Dimension of RNN cells.
:type hidden_size: int
:param param_attr: Parameter properties of hidden layer weights that
can be learned
:type param_attr: ParamAttr
:param bias_attr: Bias properties of hidden layer weights that can be learned
:type bias_attr: ParamAttr
:param is_reverse: Whether to calculate the inverse RNN
:type is_reverse: bool
:return: A simple RNN layer.
:rtype: Variable
:param hidden_activation: Activation for hidden cell
:type hidden_activation: Activation
:param activation: Activation for output
:type activation: Activation
:param name: Name of cell
:type name: string
'''
if
is_reverse
:
input
=
fluid
.
layers
.
sequence_reverse
(
x
=
input
)
pad_value
=
fluid
.
layers
.
assign
(
input
=
np
.
array
([
0.0
],
dtype
=
np
.
float32
))
input
,
length
=
fluid
.
layers
.
sequence_pad
(
input
,
pad_value
)
rnn
=
fluid
.
layers
.
StaticRNN
()
input
=
fluid
.
layers
.
transpose
(
input
,
[
1
,
0
,
2
])
with
rnn
.
step
():
in_
=
rnn
.
step_input
(
input
)
mem
=
rnn
.
memory
(
shape
=
[
-
1
,
size
],
batch_ref
=
in_
)
out
=
fluid
.
layers
.
fc
(
input
=
mem
,
size
=
size
,
act
=
None
,
param_attr
=
param_attr
,
bias_attr
=
bias_attr
)
out
=
fluid
.
layers
.
elementwise_add
(
out
,
in_
)
out
=
fluid
.
layers
.
brelu
(
out
)
rnn
.
update_memory
(
mem
,
out
)
rnn
.
output
(
out
)
out
=
rnn
()
out
=
fluid
.
layers
.
transpose
(
out
,
[
1
,
0
,
2
])
out
=
fluid
.
layers
.
sequence_unpad
(
x
=
out
,
length
=
length
)
if
is_reverse
:
out
=
fluid
.
layers
.
sequence_reverse
(
x
=
out
)
return
out
def
__init__
(
self
,
hidden_size
,
param_attr
=
None
,
bias_attr
=
None
,
hidden_activation
=
None
,
activation
=
None
,
dtype
=
"float32"
,
name
=
"RNNCell"
):
self
.
hidden_size
=
hidden_size
self
.
param_attr
=
param_attr
self
.
bias_attr
=
bias_attr
self
.
hidden_activation
=
hidden_activation
self
.
activation
=
activation
or
fluid
.
layers
.
brelu
self
.
name
=
name
def
call
(
self
,
inputs
,
states
):
new_hidden
=
fluid
.
layers
.
fc
(
input
=
states
,
size
=
self
.
hidden_size
,
act
=
self
.
hidden_activation
,
param_attr
=
self
.
param_attr
,
bias_attr
=
self
.
bias_attr
)
new_hidden
=
fluid
.
layers
.
elementwise_add
(
new_hidden
,
inputs
)
new_hidden
=
self
.
activation
(
new_hidden
)
return
new_hidden
,
new_hidden
@
property
def
state_shape
(
self
):
return
[
self
.
hidden_size
]
def
bidirectional_simple_rnn_bn_layer
(
name
,
input
,
size
,
share_weights
):
...
...
@@ -137,20 +140,32 @@ def bidirectional_simple_rnn_bn_layer(name, input, size, share_weights):
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_batch_norm_bias'
),
moving_mean_name
=
name
+
'_batch_norm_moving_mean'
,
moving_variance_name
=
name
+
'_batch_norm_moving_variance'
)
#forward and backword in time
forward_
rnn
=
simple_rnn
(
input
=
input_proj_bn
,
size
=
size
,
#forward and backword in time
forward_
cell
=
RNNCell
(
hidden_size
=
size
,
activation
=
fluid
.
layers
.
brelu
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_rnn_weight'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_rnn_bias'
),
is_reverse
=
False
)
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_rnn_bias'
))
reverse_rnn
=
simple_rnn
(
input
=
input_proj_bn
,
size
=
size
,
pad_value
=
fluid
.
layers
.
assign
(
input
=
np
.
array
([
0.0
],
dtype
=
np
.
float32
))
input
,
length
=
fluid
.
layers
.
sequence_pad
(
input_proj_bn
,
pad_value
)
forward_rnn
,
_
=
fluid
.
layers
.
rnn
(
cell
=
forward_cell
,
inputs
=
input
,
time_major
=
False
,
is_reverse
=
False
)
forward_rnn
=
fluid
.
layers
.
sequence_unpad
(
x
=
forward_rnn
,
length
=
length
)
reverse_cell
=
RNNCell
(
hidden_size
=
size
,
activation
=
fluid
.
layers
.
brelu
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_rnn_weight'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_rnn_bias'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_rnn_bias'
))
input
,
length
=
fluid
.
layers
.
sequence_pad
(
input_proj_bn
,
pad_value
)
reverse_rnn
,
_
=
fluid
.
layers
.
rnn
(
cell
=
reverse_cell
,
inputs
=
input
,
sequence_length
=
length
,
time_major
=
False
,
is_reverse
=
True
)
reverse_rnn
=
fluid
.
layers
.
sequence_unpad
(
x
=
reverse_rnn
,
length
=
length
)
else
:
input_proj_forward
=
fluid
.
layers
.
fc
(
...
...
@@ -183,18 +198,32 @@ def bidirectional_simple_rnn_bn_layer(name, input, size, share_weights):
moving_mean_name
=
name
+
'_reverse_batch_norm_moving_mean'
,
moving_variance_name
=
name
+
'_reverse_batch_norm_moving_variance'
)
# forward and backward in time
forward_
rnn
=
simple_rnn
(
input
=
input_proj_bn_forward
,
size
=
size
,
forward_
cell
=
RNNCell
(
hidden_size
=
size
,
activation
=
fluid
.
layers
.
brelu
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_rnn_weight'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_rnn_bias'
),
is_reverse
=
False
)
reverse_rnn
=
simple_rnn
(
input
=
input_proj_bn_backward
,
size
=
size
,
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_rnn_bias'
))
pad_value
=
fluid
.
layers
.
assign
(
input
=
np
.
array
([
0.0
],
dtype
=
np
.
float32
))
input
,
length
=
fluid
.
layers
.
sequence_pad
(
input_proj_bn
,
pad_value
)
forward_rnn
,
_
=
fluid
.
layers
.
rnn
(
cell
=
forward_cell
,
inputs
=
input
,
time_major
=
False
,
is_reverse
=
False
)
forward_rnn
=
fluid
.
layers
.
sequence_unpad
(
x
=
forward_rnn
,
length
=
length
)
reverse_cell
=
RNNCell
(
hidden_size
=
size
,
activation
=
fluid
.
layers
.
brelu
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_rnn_weight'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_rnn_bias'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_rnn_bias'
))
input
,
length
=
fluid
.
layers
.
sequence_pad
(
input_proj_bn
,
pad_value
)
reverse_rnn
,
_
=
fluid
.
layers
.
rnn
(
cell
=
reverse_cell
,
inputs
=
input
,
sequence_length
=
length
,
time_major
=
False
,
is_reverse
=
True
)
reverse_rnn
=
fluid
.
layers
.
sequence_unpad
(
x
=
reverse_rnn
,
length
=
length
)
out
=
fluid
.
layers
.
concat
(
input
=
[
forward_rnn
,
reverse_rnn
],
axis
=
1
)
return
out
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录