From 35bc71701489aa3bfbde7462673b8f594af8b239 Mon Sep 17 00:00:00 2001 From: Xinghai Sun Date: Sun, 3 Sep 2017 17:24:04 +0800 Subject: [PATCH] Fixed a bug of mixing forward and backward projection in bi-directional GRUs. --- deep_speech_2/layer.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/deep_speech_2/layer.py b/deep_speech_2/layer.py index c4055aaa..8fec0eea 100644 --- a/deep_speech_2/layer.py +++ b/deep_speech_2/layer.py @@ -84,19 +84,26 @@ def bidirectional_gru_bn_layer(name, input, size, act): :rtype: LayerOutput """ # input-hidden weights shared across bi-direcitonal rnn. - input_proj = paddle.layer.fc( + input_proj_forward = paddle.layer.fc( + input=input, + size=size * 3, + act=paddle.activation.Linear(), + bias_attr=False) + input_proj_backward = paddle.layer.fc( input=input, size=size * 3, act=paddle.activation.Linear(), bias_attr=False) # batch norm is only performed on input-state projection - input_proj_bn = paddle.layer.batch_norm( - input=input_proj, act=paddle.activation.Linear()) + input_proj_bn_forward = paddle.layer.batch_norm( + input=input_proj_forward, act=paddle.activation.Linear()) + input_proj_bn_backward = paddle.layer.batch_norm( + input=input_proj_backward, act=paddle.activation.Linear()) # forward and backward in time forward_gru = paddle.layer.grumemory( - input=input_proj_bn, act=act, reverse=False) + input=input_proj_bn_forward, act=act, reverse=False) backward_gru = paddle.layer.grumemory( - input=input_proj_bn, act=act, reverse=True) + input=input_proj_bn_backward, act=act, reverse=True) return paddle.layer.concat(input=[forward_gru, backward_gru]) -- GitLab