Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
8172681b
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 2 年 前同步成功
通知
210
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8172681b
编写于
10月 29, 2019
作者:
L
lfchener
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Change StaticRNN to fluid.layers.rnn.
上级
b86bff11
变更
1
显示空白变更内容
内联
并排
Showing
1 changed file
with
41 addition
and
49 deletion
+41
-49
model_utils/network.py
model_utils/network.py
+41
-49
未找到文件。
model_utils/network.py
浏览文件 @
8172681b
...
@@ -60,6 +60,14 @@ def conv_bn_layer(input, filter_size, num_channels_in, num_channels_out, stride,
...
@@ -60,6 +60,14 @@ def conv_bn_layer(input, filter_size, num_channels_in, num_channels_out, stride,
class
RNNCell
(
fluid
.
layers
.
RNNCell
):
class
RNNCell
(
fluid
.
layers
.
RNNCell
):
def
__init__
(
self
,
hidden_size
,
param_attr
=
None
,
bias_attr
=
None
,
hidden_activation
=
None
,
activation
=
None
,
dtype
=
"float32"
,
name
=
"RNNCell"
):
'''A simple rnn cell.
'''A simple rnn cell.
:param hidden_size: Dimension of RNN cells.
:param hidden_size: Dimension of RNN cells.
:type hidden_size: int
:type hidden_size: int
...
@@ -76,14 +84,6 @@ class RNNCell(fluid.layers.RNNCell):
...
@@ -76,14 +84,6 @@ class RNNCell(fluid.layers.RNNCell):
:type name: string
:type name: string
'''
'''
def
__init__
(
self
,
hidden_size
,
param_attr
=
None
,
bias_attr
=
None
,
hidden_activation
=
None
,
activation
=
None
,
dtype
=
"float32"
,
name
=
"RNNCell"
):
self
.
hidden_size
=
hidden_size
self
.
hidden_size
=
hidden_size
self
.
param_attr
=
param_attr
self
.
param_attr
=
param_attr
self
.
bias_attr
=
bias_attr
self
.
bias_attr
=
bias_attr
...
@@ -123,6 +123,20 @@ def bidirectional_simple_rnn_bn_layer(name, input, size, share_weights):
...
@@ -123,6 +123,20 @@ def bidirectional_simple_rnn_bn_layer(name, input, size, share_weights):
:return: Bidirectional simple rnn layer.
:return: Bidirectional simple rnn layer.
:rtype: Variable
:rtype: Variable
"""
"""
forward_cell
=
RNNCell
(
hidden_size
=
size
,
activation
=
fluid
.
layers
.
brelu
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_rnn_weight'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_rnn_bias'
))
reverse_cell
=
RNNCell
(
hidden_size
=
size
,
activation
=
fluid
.
layers
.
brelu
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_rnn_weight'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_rnn_bias'
))
pad_value
=
fluid
.
layers
.
assign
(
input
=
np
.
array
([
0.0
],
dtype
=
np
.
float32
))
if
share_weights
:
if
share_weights
:
#input-hidden weights shared between bi-directional rnn.
#input-hidden weights shared between bi-directional rnn.
input_proj
=
fluid
.
layers
.
fc
(
input_proj
=
fluid
.
layers
.
fc
(
...
@@ -141,24 +155,12 @@ def bidirectional_simple_rnn_bn_layer(name, input, size, share_weights):
...
@@ -141,24 +155,12 @@ def bidirectional_simple_rnn_bn_layer(name, input, size, share_weights):
moving_mean_name
=
name
+
'_batch_norm_moving_mean'
,
moving_mean_name
=
name
+
'_batch_norm_moving_mean'
,
moving_variance_name
=
name
+
'_batch_norm_moving_variance'
)
moving_variance_name
=
name
+
'_batch_norm_moving_variance'
)
#forward and backword in time
#forward and backword in time
forward_cell
=
RNNCell
(
hidden_size
=
size
,
activation
=
fluid
.
layers
.
brelu
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_rnn_weight'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_rnn_bias'
))
pad_value
=
fluid
.
layers
.
assign
(
input
=
np
.
array
([
0.0
],
dtype
=
np
.
float32
))
input
,
length
=
fluid
.
layers
.
sequence_pad
(
input_proj_bn
,
pad_value
)
input
,
length
=
fluid
.
layers
.
sequence_pad
(
input_proj_bn
,
pad_value
)
forward_rnn
,
_
=
fluid
.
layers
.
rnn
(
forward_rnn
,
_
=
fluid
.
layers
.
rnn
(
cell
=
forward_cell
,
inputs
=
input
,
time_major
=
False
,
is_reverse
=
False
)
cell
=
forward_cell
,
inputs
=
input
,
time_major
=
False
,
is_reverse
=
False
)
forward_rnn
=
fluid
.
layers
.
sequence_unpad
(
x
=
forward_rnn
,
length
=
length
)
forward_rnn
=
fluid
.
layers
.
sequence_unpad
(
x
=
forward_rnn
,
length
=
length
)
reverse_cell
=
RNNCell
(
hidden_size
=
size
,
activation
=
fluid
.
layers
.
brelu
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_rnn_weight'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_rnn_bias'
))
input
,
length
=
fluid
.
layers
.
sequence_pad
(
input_proj_bn
,
pad_value
)
reverse_rnn
,
_
=
fluid
.
layers
.
rnn
(
reverse_rnn
,
_
=
fluid
.
layers
.
rnn
(
cell
=
reverse_cell
,
cell
=
reverse_cell
,
inputs
=
input
,
inputs
=
input
,
...
@@ -174,7 +176,7 @@ def bidirectional_simple_rnn_bn_layer(name, input, size, share_weights):
...
@@ -174,7 +176,7 @@ def bidirectional_simple_rnn_bn_layer(name, input, size, share_weights):
act
=
None
,
act
=
None
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_fc_weight'
),
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_fc_weight'
),
bias_attr
=
False
)
bias_attr
=
False
)
input_proj_
backward
=
fluid
.
layers
.
fc
(
input_proj_
reverse
=
fluid
.
layers
.
fc
(
input
=
input
,
input
=
input
,
size
=
size
,
size
=
size
,
act
=
None
,
act
=
None
,
...
@@ -189,8 +191,8 @@ def bidirectional_simple_rnn_bn_layer(name, input, size, share_weights):
...
@@ -189,8 +191,8 @@ def bidirectional_simple_rnn_bn_layer(name, input, size, share_weights):
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_batch_norm_bias'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_batch_norm_bias'
),
moving_mean_name
=
name
+
'_forward_batch_norm_moving_mean'
,
moving_mean_name
=
name
+
'_forward_batch_norm_moving_mean'
,
moving_variance_name
=
name
+
'_forward_batch_norm_moving_variance'
)
moving_variance_name
=
name
+
'_forward_batch_norm_moving_variance'
)
input_proj_bn_
backward
=
fluid
.
layers
.
batch_norm
(
input_proj_bn_
reverse
=
fluid
.
layers
.
batch_norm
(
input
=
input_proj_
backward
,
input
=
input_proj_
reverse
,
act
=
None
,
act
=
None
,
param_attr
=
fluid
.
ParamAttr
(
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_batch_norm_weight'
),
name
=
name
+
'_reverse_batch_norm_weight'
),
...
@@ -198,24 +200,14 @@ def bidirectional_simple_rnn_bn_layer(name, input, size, share_weights):
...
@@ -198,24 +200,14 @@ def bidirectional_simple_rnn_bn_layer(name, input, size, share_weights):
moving_mean_name
=
name
+
'_reverse_batch_norm_moving_mean'
,
moving_mean_name
=
name
+
'_reverse_batch_norm_moving_mean'
,
moving_variance_name
=
name
+
'_reverse_batch_norm_moving_variance'
)
moving_variance_name
=
name
+
'_reverse_batch_norm_moving_variance'
)
# forward and backward in time
# forward and backward in time
forward_cell
=
RNNCell
(
input
,
length
=
fluid
.
layers
.
sequence_pad
(
input_proj_bn_forward
,
hidden_size
=
size
,
pad_value
)
activation
=
fluid
.
layers
.
brelu
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_rnn_weight'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_rnn_bias'
))
pad_value
=
fluid
.
layers
.
assign
(
input
=
np
.
array
([
0.0
],
dtype
=
np
.
float32
))
input
,
length
=
fluid
.
layers
.
sequence_pad
(
input_proj_bn
,
pad_value
)
forward_rnn
,
_
=
fluid
.
layers
.
rnn
(
forward_rnn
,
_
=
fluid
.
layers
.
rnn
(
cell
=
forward_cell
,
inputs
=
input
,
time_major
=
False
,
is_reverse
=
False
)
cell
=
forward_cell
,
inputs
=
input
,
time_major
=
False
,
is_reverse
=
False
)
forward_rnn
=
fluid
.
layers
.
sequence_unpad
(
x
=
forward_rnn
,
length
=
length
)
forward_rnn
=
fluid
.
layers
.
sequence_unpad
(
x
=
forward_rnn
,
length
=
length
)
reverse_cell
=
RNNCell
(
input
,
length
=
fluid
.
layers
.
sequence_pad
(
input_proj_bn_reverse
,
hidden_size
=
size
,
pad_value
)
activation
=
fluid
.
layers
.
brelu
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_rnn_weight'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_rnn_bias'
))
input
,
length
=
fluid
.
layers
.
sequence_pad
(
input_proj_bn
,
pad_value
)
reverse_rnn
,
_
=
fluid
.
layers
.
rnn
(
reverse_rnn
,
_
=
fluid
.
layers
.
rnn
(
cell
=
reverse_cell
,
cell
=
reverse_cell
,
inputs
=
input
,
inputs
=
input
,
...
@@ -248,7 +240,7 @@ def bidirectional_gru_bn_layer(name, input, size, act):
...
@@ -248,7 +240,7 @@ def bidirectional_gru_bn_layer(name, input, size, act):
act
=
None
,
act
=
None
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_fc_weight'
),
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_fc_weight'
),
bias_attr
=
False
)
bias_attr
=
False
)
input_proj_
backward
=
fluid
.
layers
.
fc
(
input_proj_
reverse
=
fluid
.
layers
.
fc
(
input
=
input
,
input
=
input
,
size
=
size
*
3
,
size
=
size
*
3
,
act
=
None
,
act
=
None
,
...
@@ -262,8 +254,8 @@ def bidirectional_gru_bn_layer(name, input, size, act):
...
@@ -262,8 +254,8 @@ def bidirectional_gru_bn_layer(name, input, size, act):
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_batch_norm_bias'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_batch_norm_bias'
),
moving_mean_name
=
name
+
'_forward_batch_norm_moving_mean'
,
moving_mean_name
=
name
+
'_forward_batch_norm_moving_mean'
,
moving_variance_name
=
name
+
'_forward_batch_norm_moving_variance'
)
moving_variance_name
=
name
+
'_forward_batch_norm_moving_variance'
)
input_proj_bn_
backward
=
fluid
.
layers
.
batch_norm
(
input_proj_bn_
reverse
=
fluid
.
layers
.
batch_norm
(
input
=
input_proj_
backward
,
input
=
input_proj_
reverse
,
act
=
None
,
act
=
None
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_batch_norm_weight'
),
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_batch_norm_weight'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_batch_norm_bias'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_reverse_batch_norm_bias'
),
...
@@ -279,7 +271,7 @@ def bidirectional_gru_bn_layer(name, input, size, act):
...
@@ -279,7 +271,7 @@ def bidirectional_gru_bn_layer(name, input, size, act):
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_gru_bias'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_forward_gru_bias'
),
is_reverse
=
False
)
is_reverse
=
False
)
reverse_gru
=
fluid
.
layers
.
dynamic_gru
(
reverse_gru
=
fluid
.
layers
.
dynamic_gru
(
input
=
input_proj_bn_
backward
,
input
=
input_proj_bn_
reverse
,
size
=
size
,
size
=
size
,
gate_activation
=
'sigmoid'
,
gate_activation
=
'sigmoid'
,
candidate_activation
=
act
,
candidate_activation
=
act
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录